]> Git Repo - J-linux.git/blob - drivers/scsi/lpfc/lpfc_sli.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[J-linux.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/crash_dump.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_compat.h"
54 #include "lpfc_debugfs.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_version.h"
57
58 /* There are only four IOCB completion types. */
59 typedef enum _lpfc_iocb_type {
60         LPFC_UNKNOWN_IOCB,
61         LPFC_UNSOL_IOCB,
62         LPFC_SOL_IOCB,
63         LPFC_ABORT_IOCB
64 } lpfc_iocb_type;
65
66
67 /* Provide function prototypes local to this module. */
68 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
69                                   uint32_t);
70 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
71                               uint8_t *, uint32_t *);
72 static struct lpfc_iocbq *
73 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
74                                   struct lpfc_iocbq *rspiocbq);
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76                                       struct hbq_dmabuf *);
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78                                           struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80                                    struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82                                        int);
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84                                      struct lpfc_queue *eq,
85                                      struct lpfc_eqe *eqe);
86 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90                                     struct lpfc_queue *cq,
91                                     struct lpfc_cqe *cqe);
92 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
93                                  struct lpfc_iocbq *pwqeq,
94                                  struct lpfc_sglq *sglq);
95
96 union lpfc_wqe128 lpfc_iread_cmd_template;
97 union lpfc_wqe128 lpfc_iwrite_cmd_template;
98 union lpfc_wqe128 lpfc_icmnd_cmd_template;
99
100 /* Setup WQE templates for IOs */
101 void lpfc_wqe_cmd_template(void)
102 {
103         union lpfc_wqe128 *wqe;
104
105         /* IREAD template */
106         wqe = &lpfc_iread_cmd_template;
107         memset(wqe, 0, sizeof(union lpfc_wqe128));
108
109         /* Word 0, 1, 2 - BDE is variable */
110
111         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
112
113         /* Word 4 - total_xfer_len is variable */
114
115         /* Word 5 - is zero */
116
117         /* Word 6 - ctxt_tag, xri_tag is variable */
118
119         /* Word 7 */
120         bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
121         bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
122         bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
123         bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
124
125         /* Word 8 - abort_tag is variable */
126
127         /* Word 9  - reqtag is variable */
128
129         /* Word 10 - dbde, wqes is variable */
130         bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
131         bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
132         bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
133         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
134         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
135
136         /* Word 11 - pbde is variable */
137         bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
138         bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
139         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
140
141         /* Word 12 - is zero */
142
143         /* Word 13, 14, 15 - PBDE is variable */
144
145         /* IWRITE template */
146         wqe = &lpfc_iwrite_cmd_template;
147         memset(wqe, 0, sizeof(union lpfc_wqe128));
148
149         /* Word 0, 1, 2 - BDE is variable */
150
151         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
152
153         /* Word 4 - total_xfer_len is variable */
154
155         /* Word 5 - initial_xfer_len is variable */
156
157         /* Word 6 - ctxt_tag, xri_tag is variable */
158
159         /* Word 7 */
160         bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
161         bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
162         bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
163         bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
164
165         /* Word 8 - abort_tag is variable */
166
167         /* Word 9  - reqtag is variable */
168
169         /* Word 10 - dbde, wqes is variable */
170         bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
171         bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
172         bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
173         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
174         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
175
176         /* Word 11 - pbde is variable */
177         bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
178         bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
179         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
180
181         /* Word 12 - is zero */
182
183         /* Word 13, 14, 15 - PBDE is variable */
184
185         /* ICMND template */
186         wqe = &lpfc_icmnd_cmd_template;
187         memset(wqe, 0, sizeof(union lpfc_wqe128));
188
189         /* Word 0, 1, 2 - BDE is variable */
190
191         /* Word 3 - payload_offset_len is variable */
192
193         /* Word 4, 5 - is zero */
194
195         /* Word 6 - ctxt_tag, xri_tag is variable */
196
197         /* Word 7 */
198         bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
199         bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
200         bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
201         bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
202
203         /* Word 8 - abort_tag is variable */
204
205         /* Word 9  - reqtag is variable */
206
207         /* Word 10 - dbde, wqes is variable */
208         bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
209         bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
210         bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
211         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
212         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
213
214         /* Word 11 */
215         bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
216         bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
217         bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
218
219         /* Word 12, 13, 14, 15 - is zero */
220 }
221
222 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
223 /**
224  * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
225  * @srcp: Source memory pointer.
226  * @destp: Destination memory pointer.
227  * @cnt: Number of words required to be copied.
228  *       Must be a multiple of sizeof(uint64_t)
229  *
230  * This function is used for copying data between driver memory
231  * and the SLI WQ. This function also changes the endianness
232  * of each word if native endianness is different from SLI
233  * endianness. This function can be called with or without
234  * lock.
235  **/
236 static void
237 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
238 {
239         uint64_t *src = srcp;
240         uint64_t *dest = destp;
241         int i;
242
243         for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
244                 *dest++ = *src++;
245 }
246 #else
247 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
248 #endif
249
250 /**
251  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
252  * @q: The Work Queue to operate on.
253  * @wqe: The work Queue Entry to put on the Work queue.
254  *
255  * This routine will copy the contents of @wqe to the next available entry on
256  * the @q. This function will then ring the Work Queue Doorbell to signal the
257  * HBA to start processing the Work Queue Entry. This function returns 0 if
258  * successful. If no entries are available on @q then this function will return
259  * -ENOMEM.
260  * The caller is expected to hold the hbalock when calling this routine.
261  **/
262 static int
263 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
264 {
265         union lpfc_wqe *temp_wqe;
266         struct lpfc_register doorbell;
267         uint32_t host_index;
268         uint32_t idx;
269         uint32_t i = 0;
270         uint8_t *tmp;
271         u32 if_type;
272
273         /* sanity check on queue memory */
274         if (unlikely(!q))
275                 return -ENOMEM;
276
277         temp_wqe = lpfc_sli4_qe(q, q->host_index);
278
279         /* If the host has not yet processed the next entry then we are done */
280         idx = ((q->host_index + 1) % q->entry_count);
281         if (idx == q->hba_index) {
282                 q->WQ_overflow++;
283                 return -EBUSY;
284         }
285         q->WQ_posted++;
286         /* set consumption flag every once in a while */
287         if (!((q->host_index + 1) % q->notify_interval))
288                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
289         else
290                 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
291         if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
292                 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
293         lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
294         if (q->dpp_enable && q->phba->cfg_enable_dpp) {
295                 /* write to DPP aperture taking advatage of Combined Writes */
296                 tmp = (uint8_t *)temp_wqe;
297 #ifdef __raw_writeq
298                 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
299                         __raw_writeq(*((uint64_t *)(tmp + i)),
300                                         q->dpp_regaddr + i);
301 #else
302                 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
303                         __raw_writel(*((uint32_t *)(tmp + i)),
304                                         q->dpp_regaddr + i);
305 #endif
306         }
307         /* ensure WQE bcopy and DPP flushed before doorbell write */
308         wmb();
309
310         /* Update the host index before invoking device */
311         host_index = q->host_index;
312
313         q->host_index = idx;
314
315         /* Ring Doorbell */
316         doorbell.word0 = 0;
317         if (q->db_format == LPFC_DB_LIST_FORMAT) {
318                 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
319                         bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
320                         bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
321                         bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
322                             q->dpp_id);
323                         bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
324                             q->queue_id);
325                 } else {
326                         bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
327                         bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
328
329                         /* Leave bits <23:16> clear for if_type 6 dpp */
330                         if_type = bf_get(lpfc_sli_intf_if_type,
331                                          &q->phba->sli4_hba.sli_intf);
332                         if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
333                                 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
334                                        host_index);
335                 }
336         } else if (q->db_format == LPFC_DB_RING_FORMAT) {
337                 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
338                 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
339         } else {
340                 return -EINVAL;
341         }
342         writel(doorbell.word0, q->db_regaddr);
343
344         return 0;
345 }
346
347 /**
348  * lpfc_sli4_wq_release - Updates internal hba index for WQ
349  * @q: The Work Queue to operate on.
350  * @index: The index to advance the hba index to.
351  *
352  * This routine will update the HBA index of a queue to reflect consumption of
353  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
354  * an entry the host calls this function to update the queue's internal
355  * pointers.
356  **/
357 static void
358 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
359 {
360         /* sanity check on queue memory */
361         if (unlikely(!q))
362                 return;
363
364         q->hba_index = index;
365 }
366
367 /**
368  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
369  * @q: The Mailbox Queue to operate on.
370  * @mqe: The Mailbox Queue Entry to put on the Work queue.
371  *
372  * This routine will copy the contents of @mqe to the next available entry on
373  * the @q. This function will then ring the Work Queue Doorbell to signal the
374  * HBA to start processing the Work Queue Entry. This function returns 0 if
375  * successful. If no entries are available on @q then this function will return
376  * -ENOMEM.
377  * The caller is expected to hold the hbalock when calling this routine.
378  **/
379 static uint32_t
380 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
381 {
382         struct lpfc_mqe *temp_mqe;
383         struct lpfc_register doorbell;
384
385         /* sanity check on queue memory */
386         if (unlikely(!q))
387                 return -ENOMEM;
388         temp_mqe = lpfc_sli4_qe(q, q->host_index);
389
390         /* If the host has not yet processed the next entry then we are done */
391         if (((q->host_index + 1) % q->entry_count) == q->hba_index)
392                 return -ENOMEM;
393         lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
394         /* Save off the mailbox pointer for completion */
395         q->phba->mbox = (MAILBOX_t *)temp_mqe;
396
397         /* Update the host index before invoking device */
398         q->host_index = ((q->host_index + 1) % q->entry_count);
399
400         /* Ring Doorbell */
401         doorbell.word0 = 0;
402         bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
403         bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
404         writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
405         return 0;
406 }
407
408 /**
409  * lpfc_sli4_mq_release - Updates internal hba index for MQ
410  * @q: The Mailbox Queue to operate on.
411  *
412  * This routine will update the HBA index of a queue to reflect consumption of
413  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
414  * an entry the host calls this function to update the queue's internal
415  * pointers. This routine returns the number of entries that were consumed by
416  * the HBA.
417  **/
418 static uint32_t
419 lpfc_sli4_mq_release(struct lpfc_queue *q)
420 {
421         /* sanity check on queue memory */
422         if (unlikely(!q))
423                 return 0;
424
425         /* Clear the mailbox pointer for completion */
426         q->phba->mbox = NULL;
427         q->hba_index = ((q->hba_index + 1) % q->entry_count);
428         return 1;
429 }
430
431 /**
432  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
433  * @q: The Event Queue to get the first valid EQE from
434  *
435  * This routine will get the first valid Event Queue Entry from @q, update
436  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
437  * the Queue (no more work to do), or the Queue is full of EQEs that have been
438  * processed, but not popped back to the HBA then this routine will return NULL.
439  **/
440 static struct lpfc_eqe *
441 lpfc_sli4_eq_get(struct lpfc_queue *q)
442 {
443         struct lpfc_eqe *eqe;
444
445         /* sanity check on queue memory */
446         if (unlikely(!q))
447                 return NULL;
448         eqe = lpfc_sli4_qe(q, q->host_index);
449
450         /* If the next EQE is not valid then we are done */
451         if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
452                 return NULL;
453
454         /*
455          * insert barrier for instruction interlock : data from the hardware
456          * must have the valid bit checked before it can be copied and acted
457          * upon. Speculative instructions were allowing a bcopy at the start
458          * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
459          * after our return, to copy data before the valid bit check above
460          * was done. As such, some of the copied data was stale. The barrier
461          * ensures the check is before any data is copied.
462          */
463         mb();
464         return eqe;
465 }
466
467 /**
468  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
469  * @q: The Event Queue to disable interrupts
470  *
471  **/
472 void
473 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
474 {
475         struct lpfc_register doorbell;
476
477         doorbell.word0 = 0;
478         bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
479         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
480         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
481                 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
482         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
483         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
484 }
485
486 /**
487  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
488  * @q: The Event Queue to disable interrupts
489  *
490  **/
491 void
492 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
493 {
494         struct lpfc_register doorbell;
495
496         doorbell.word0 = 0;
497         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
498         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
499 }
500
501 /**
502  * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
503  * @phba: adapter with EQ
504  * @q: The Event Queue that the host has completed processing for.
505  * @count: Number of elements that have been consumed
506  * @arm: Indicates whether the host wants to arms this CQ.
507  *
508  * This routine will notify the HBA, by ringing the doorbell, that count
509  * number of EQEs have been processed. The @arm parameter indicates whether
510  * the queue should be rearmed when ringing the doorbell.
511  **/
512 void
513 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
514                      uint32_t count, bool arm)
515 {
516         struct lpfc_register doorbell;
517
518         /* sanity check on queue memory */
519         if (unlikely(!q || (count == 0 && !arm)))
520                 return;
521
522         /* ring doorbell for number popped */
523         doorbell.word0 = 0;
524         if (arm) {
525                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
526                 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
527         }
528         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
529         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
530         bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
531                         (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
532         bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
533         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
534         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
535         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
536                 readl(q->phba->sli4_hba.EQDBregaddr);
537 }
538
539 /**
540  * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
541  * @phba: adapter with EQ
542  * @q: The Event Queue that the host has completed processing for.
543  * @count: Number of elements that have been consumed
544  * @arm: Indicates whether the host wants to arms this CQ.
545  *
546  * This routine will notify the HBA, by ringing the doorbell, that count
547  * number of EQEs have been processed. The @arm parameter indicates whether
548  * the queue should be rearmed when ringing the doorbell.
549  **/
550 void
551 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
552                           uint32_t count, bool arm)
553 {
554         struct lpfc_register doorbell;
555
556         /* sanity check on queue memory */
557         if (unlikely(!q || (count == 0 && !arm)))
558                 return;
559
560         /* ring doorbell for number popped */
561         doorbell.word0 = 0;
562         if (arm)
563                 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
564         bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
565         bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
566         writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
567         /* PCI read to flush PCI pipeline on re-arming for INTx mode */
568         if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
569                 readl(q->phba->sli4_hba.EQDBregaddr);
570 }
571
572 static void
573 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
574                         struct lpfc_eqe *eqe)
575 {
576         if (!phba->sli4_hba.pc_sli4_params.eqav)
577                 bf_set_le32(lpfc_eqe_valid, eqe, 0);
578
579         eq->host_index = ((eq->host_index + 1) % eq->entry_count);
580
581         /* if the index wrapped around, toggle the valid bit */
582         if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
583                 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
584 }
585
586 static void
587 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
588 {
589         struct lpfc_eqe *eqe = NULL;
590         u32 eq_count = 0, cq_count = 0;
591         struct lpfc_cqe *cqe = NULL;
592         struct lpfc_queue *cq = NULL, *childq = NULL;
593         int cqid = 0;
594
595         /* walk all the EQ entries and drop on the floor */
596         eqe = lpfc_sli4_eq_get(eq);
597         while (eqe) {
598                 /* Get the reference to the corresponding CQ */
599                 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
600                 cq = NULL;
601
602                 list_for_each_entry(childq, &eq->child_list, list) {
603                         if (childq->queue_id == cqid) {
604                                 cq = childq;
605                                 break;
606                         }
607                 }
608                 /* If CQ is valid, iterate through it and drop all the CQEs */
609                 if (cq) {
610                         cqe = lpfc_sli4_cq_get(cq);
611                         while (cqe) {
612                                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
613                                 cq_count++;
614                                 cqe = lpfc_sli4_cq_get(cq);
615                         }
616                         /* Clear and re-arm the CQ */
617                         phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
618                             LPFC_QUEUE_REARM);
619                         cq_count = 0;
620                 }
621                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
622                 eq_count++;
623                 eqe = lpfc_sli4_eq_get(eq);
624         }
625
626         /* Clear and re-arm the EQ */
627         phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
628 }
629
630 static int
631 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
632                      uint8_t rearm)
633 {
634         struct lpfc_eqe *eqe;
635         int count = 0, consumed = 0;
636
637         if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
638                 goto rearm_and_exit;
639
640         eqe = lpfc_sli4_eq_get(eq);
641         while (eqe) {
642                 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
643                 __lpfc_sli4_consume_eqe(phba, eq, eqe);
644
645                 consumed++;
646                 if (!(++count % eq->max_proc_limit))
647                         break;
648
649                 if (!(count % eq->notify_interval)) {
650                         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
651                                                         LPFC_QUEUE_NOARM);
652                         consumed = 0;
653                 }
654
655                 eqe = lpfc_sli4_eq_get(eq);
656         }
657         eq->EQ_processed += count;
658
659         /* Track the max number of EQEs processed in 1 intr */
660         if (count > eq->EQ_max_eqe)
661                 eq->EQ_max_eqe = count;
662
663         xchg(&eq->queue_claimed, 0);
664
665 rearm_and_exit:
666         /* Always clear the EQ. */
667         phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
668
669         return count;
670 }
671
672 /**
673  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
674  * @q: The Completion Queue to get the first valid CQE from
675  *
676  * This routine will get the first valid Completion Queue Entry from @q, update
677  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
678  * the Queue (no more work to do), or the Queue is full of CQEs that have been
679  * processed, but not popped back to the HBA then this routine will return NULL.
680  **/
681 static struct lpfc_cqe *
682 lpfc_sli4_cq_get(struct lpfc_queue *q)
683 {
684         struct lpfc_cqe *cqe;
685
686         /* sanity check on queue memory */
687         if (unlikely(!q))
688                 return NULL;
689         cqe = lpfc_sli4_qe(q, q->host_index);
690
691         /* If the next CQE is not valid then we are done */
692         if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
693                 return NULL;
694
695         /*
696          * insert barrier for instruction interlock : data from the hardware
697          * must have the valid bit checked before it can be copied and acted
698          * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
699          * instructions allowing action on content before valid bit checked,
700          * add barrier here as well. May not be needed as "content" is a
701          * single 32-bit entity here (vs multi word structure for cq's).
702          */
703         mb();
704         return cqe;
705 }
706
707 static void
708 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
709                         struct lpfc_cqe *cqe)
710 {
711         if (!phba->sli4_hba.pc_sli4_params.cqav)
712                 bf_set_le32(lpfc_cqe_valid, cqe, 0);
713
714         cq->host_index = ((cq->host_index + 1) % cq->entry_count);
715
716         /* if the index wrapped around, toggle the valid bit */
717         if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
718                 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
719 }
720
721 /**
722  * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
723  * @phba: the adapter with the CQ
724  * @q: The Completion Queue that the host has completed processing for.
725  * @count: the number of elements that were consumed
726  * @arm: Indicates whether the host wants to arms this CQ.
727  *
728  * This routine will notify the HBA, by ringing the doorbell, that the
729  * CQEs have been processed. The @arm parameter specifies whether the
730  * queue should be rearmed when ringing the doorbell.
731  **/
732 void
733 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
734                      uint32_t count, bool arm)
735 {
736         struct lpfc_register doorbell;
737
738         /* sanity check on queue memory */
739         if (unlikely(!q || (count == 0 && !arm)))
740                 return;
741
742         /* ring doorbell for number popped */
743         doorbell.word0 = 0;
744         if (arm)
745                 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
746         bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
747         bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
748         bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
749                         (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
750         bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
751         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
752 }
753
754 /**
755  * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
756  * @phba: the adapter with the CQ
757  * @q: The Completion Queue that the host has completed processing for.
758  * @count: the number of elements that were consumed
759  * @arm: Indicates whether the host wants to arms this CQ.
760  *
761  * This routine will notify the HBA, by ringing the doorbell, that the
762  * CQEs have been processed. The @arm parameter specifies whether the
763  * queue should be rearmed when ringing the doorbell.
764  **/
765 void
766 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
767                          uint32_t count, bool arm)
768 {
769         struct lpfc_register doorbell;
770
771         /* sanity check on queue memory */
772         if (unlikely(!q || (count == 0 && !arm)))
773                 return;
774
775         /* ring doorbell for number popped */
776         doorbell.word0 = 0;
777         if (arm)
778                 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
779         bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
780         bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
781         writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
782 }
783
784 /*
785  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
786  *
787  * This routine will copy the contents of @wqe to the next available entry on
788  * the @q. This function will then ring the Receive Queue Doorbell to signal the
789  * HBA to start processing the Receive Queue Entry. This function returns the
790  * index that the rqe was copied to if successful. If no entries are available
791  * on @q then this function will return -ENOMEM.
792  * The caller is expected to hold the hbalock when calling this routine.
793  **/
794 int
795 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
796                  struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
797 {
798         struct lpfc_rqe *temp_hrqe;
799         struct lpfc_rqe *temp_drqe;
800         struct lpfc_register doorbell;
801         int hq_put_index;
802         int dq_put_index;
803
804         /* sanity check on queue memory */
805         if (unlikely(!hq) || unlikely(!dq))
806                 return -ENOMEM;
807         hq_put_index = hq->host_index;
808         dq_put_index = dq->host_index;
809         temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
810         temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
811
812         if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
813                 return -EINVAL;
814         if (hq_put_index != dq_put_index)
815                 return -EINVAL;
816         /* If the host has not yet processed the next entry then we are done */
817         if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
818                 return -EBUSY;
819         lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
820         lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
821
822         /* Update the host index to point to the next slot */
823         hq->host_index = ((hq_put_index + 1) % hq->entry_count);
824         dq->host_index = ((dq_put_index + 1) % dq->entry_count);
825         hq->RQ_buf_posted++;
826
827         /* Ring The Header Receive Queue Doorbell */
828         if (!(hq->host_index % hq->notify_interval)) {
829                 doorbell.word0 = 0;
830                 if (hq->db_format == LPFC_DB_RING_FORMAT) {
831                         bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
832                                hq->notify_interval);
833                         bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
834                 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
835                         bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
836                                hq->notify_interval);
837                         bf_set(lpfc_rq_db_list_fm_index, &doorbell,
838                                hq->host_index);
839                         bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
840                 } else {
841                         return -EINVAL;
842                 }
843                 writel(doorbell.word0, hq->db_regaddr);
844         }
845         return hq_put_index;
846 }
847
848 /*
849  * lpfc_sli4_rq_release - Updates internal hba index for RQ
850  *
851  * This routine will update the HBA index of a queue to reflect consumption of
852  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
853  * consumed an entry the host calls this function to update the queue's
854  * internal pointers. This routine returns the number of entries that were
855  * consumed by the HBA.
856  **/
857 static uint32_t
858 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
859 {
860         /* sanity check on queue memory */
861         if (unlikely(!hq) || unlikely(!dq))
862                 return 0;
863
864         if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
865                 return 0;
866         hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
867         dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
868         return 1;
869 }
870
871 /**
872  * lpfc_cmd_iocb - Get next command iocb entry in the ring
873  * @phba: Pointer to HBA context object.
874  * @pring: Pointer to driver SLI ring object.
875  *
876  * This function returns pointer to next command iocb entry
877  * in the command ring. The caller must hold hbalock to prevent
878  * other threads consume the next command iocb.
879  * SLI-2/SLI-3 provide different sized iocbs.
880  **/
881 static inline IOCB_t *
882 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
883 {
884         return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
885                            pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
886 }
887
888 /**
889  * lpfc_resp_iocb - Get next response iocb entry in the ring
890  * @phba: Pointer to HBA context object.
891  * @pring: Pointer to driver SLI ring object.
892  *
893  * This function returns pointer to next response iocb entry
894  * in the response ring. The caller must hold hbalock to make sure
895  * that no other thread consume the next response iocb.
896  * SLI-2/SLI-3 provide different sized iocbs.
897  **/
898 static inline IOCB_t *
899 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
900 {
901         return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
902                            pring->sli.sli3.rspidx * phba->iocb_rsp_size);
903 }
904
905 /**
906  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
907  * @phba: Pointer to HBA context object.
908  *
909  * This function is called with hbalock held. This function
910  * allocates a new driver iocb object from the iocb pool. If the
911  * allocation is successful, it returns pointer to the newly
912  * allocated iocb object else it returns NULL.
913  **/
914 struct lpfc_iocbq *
915 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
916 {
917         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
918         struct lpfc_iocbq * iocbq = NULL;
919
920         lockdep_assert_held(&phba->hbalock);
921
922         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
923         if (iocbq)
924                 phba->iocb_cnt++;
925         if (phba->iocb_cnt > phba->iocb_max)
926                 phba->iocb_max = phba->iocb_cnt;
927         return iocbq;
928 }
929
930 /**
931  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
932  * @phba: Pointer to HBA context object.
933  * @xritag: XRI value.
934  *
935  * This function clears the sglq pointer from the array of active
936  * sglq's. The xritag that is passed in is used to index into the
937  * array. Before the xritag can be used it needs to be adjusted
938  * by subtracting the xribase.
939  *
940  * Returns sglq ponter = success, NULL = Failure.
941  **/
942 struct lpfc_sglq *
943 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
944 {
945         struct lpfc_sglq *sglq;
946
947         sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
948         phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
949         return sglq;
950 }
951
952 /**
953  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
954  * @phba: Pointer to HBA context object.
955  * @xritag: XRI value.
956  *
957  * This function returns the sglq pointer from the array of active
958  * sglq's. The xritag that is passed in is used to index into the
959  * array. Before the xritag can be used it needs to be adjusted
960  * by subtracting the xribase.
961  *
962  * Returns sglq ponter = success, NULL = Failure.
963  **/
964 struct lpfc_sglq *
965 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
966 {
967         struct lpfc_sglq *sglq;
968
969         sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
970         return sglq;
971 }
972
973 /**
974  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
975  * @phba: Pointer to HBA context object.
976  * @xritag: xri used in this exchange.
977  * @rrq: The RRQ to be cleared.
978  *
979  **/
980 void
981 lpfc_clr_rrq_active(struct lpfc_hba *phba,
982                     uint16_t xritag,
983                     struct lpfc_node_rrq *rrq)
984 {
985         struct lpfc_nodelist *ndlp = NULL;
986
987         /* Lookup did to verify if did is still active on this vport */
988         if (rrq->vport)
989                 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
990
991         if (!ndlp)
992                 goto out;
993
994         if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
995                 rrq->send_rrq = 0;
996                 rrq->xritag = 0;
997                 rrq->rrq_stop_time = 0;
998         }
999 out:
1000         mempool_free(rrq, phba->rrq_pool);
1001 }
1002
1003 /**
1004  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1005  * @phba: Pointer to HBA context object.
1006  *
1007  * This function is called with hbalock held. This function
1008  * Checks if stop_time (ratov from setting rrq active) has
1009  * been reached, if it has and the send_rrq flag is set then
1010  * it will call lpfc_send_rrq. If the send_rrq flag is not set
1011  * then it will just call the routine to clear the rrq and
1012  * free the rrq resource.
1013  * The timer is set to the next rrq that is going to expire before
1014  * leaving the routine.
1015  *
1016  **/
1017 void
1018 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1019 {
1020         struct lpfc_node_rrq *rrq;
1021         struct lpfc_node_rrq *nextrrq;
1022         unsigned long next_time;
1023         unsigned long iflags;
1024         LIST_HEAD(send_rrq);
1025
1026         spin_lock_irqsave(&phba->hbalock, iflags);
1027         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1028         next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1029         list_for_each_entry_safe(rrq, nextrrq,
1030                                  &phba->active_rrq_list, list) {
1031                 if (time_after(jiffies, rrq->rrq_stop_time))
1032                         list_move(&rrq->list, &send_rrq);
1033                 else if (time_before(rrq->rrq_stop_time, next_time))
1034                         next_time = rrq->rrq_stop_time;
1035         }
1036         spin_unlock_irqrestore(&phba->hbalock, iflags);
1037         if ((!list_empty(&phba->active_rrq_list)) &&
1038             (!(phba->pport->load_flag & FC_UNLOADING)))
1039                 mod_timer(&phba->rrq_tmr, next_time);
1040         list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1041                 list_del(&rrq->list);
1042                 if (!rrq->send_rrq) {
1043                         /* this call will free the rrq */
1044                         lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1045                 } else if (lpfc_send_rrq(phba, rrq)) {
1046                         /* if we send the rrq then the completion handler
1047                         *  will clear the bit in the xribitmap.
1048                         */
1049                         lpfc_clr_rrq_active(phba, rrq->xritag,
1050                                             rrq);
1051                 }
1052         }
1053 }
1054
1055 /**
1056  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1057  * @vport: Pointer to vport context object.
1058  * @xri: The xri used in the exchange.
1059  * @did: The targets DID for this exchange.
1060  *
1061  * returns NULL = rrq not found in the phba->active_rrq_list.
1062  *         rrq = rrq for this xri and target.
1063  **/
1064 struct lpfc_node_rrq *
1065 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1066 {
1067         struct lpfc_hba *phba = vport->phba;
1068         struct lpfc_node_rrq *rrq;
1069         struct lpfc_node_rrq *nextrrq;
1070         unsigned long iflags;
1071
1072         if (phba->sli_rev != LPFC_SLI_REV4)
1073                 return NULL;
1074         spin_lock_irqsave(&phba->hbalock, iflags);
1075         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1076                 if (rrq->vport == vport && rrq->xritag == xri &&
1077                                 rrq->nlp_DID == did){
1078                         list_del(&rrq->list);
1079                         spin_unlock_irqrestore(&phba->hbalock, iflags);
1080                         return rrq;
1081                 }
1082         }
1083         spin_unlock_irqrestore(&phba->hbalock, iflags);
1084         return NULL;
1085 }
1086
1087 /**
1088  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1089  * @vport: Pointer to vport context object.
1090  * @ndlp: Pointer to the lpfc_node_list structure.
1091  * If ndlp is NULL Remove all active RRQs for this vport from the
1092  * phba->active_rrq_list and clear the rrq.
1093  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1094  **/
1095 void
1096 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1097
1098 {
1099         struct lpfc_hba *phba = vport->phba;
1100         struct lpfc_node_rrq *rrq;
1101         struct lpfc_node_rrq *nextrrq;
1102         unsigned long iflags;
1103         LIST_HEAD(rrq_list);
1104
1105         if (phba->sli_rev != LPFC_SLI_REV4)
1106                 return;
1107         if (!ndlp) {
1108                 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1109                 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1110         }
1111         spin_lock_irqsave(&phba->hbalock, iflags);
1112         list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1113                 if (rrq->vport != vport)
1114                         continue;
1115
1116                 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1117                         list_move(&rrq->list, &rrq_list);
1118
1119         }
1120         spin_unlock_irqrestore(&phba->hbalock, iflags);
1121
1122         list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1123                 list_del(&rrq->list);
1124                 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1125         }
1126 }
1127
1128 /**
1129  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1130  * @phba: Pointer to HBA context object.
1131  * @ndlp: Targets nodelist pointer for this exchange.
1132  * @xritag: the xri in the bitmap to test.
1133  *
1134  * This function returns:
1135  * 0 = rrq not active for this xri
1136  * 1 = rrq is valid for this xri.
1137  **/
1138 int
1139 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1140                         uint16_t  xritag)
1141 {
1142         if (!ndlp)
1143                 return 0;
1144         if (!ndlp->active_rrqs_xri_bitmap)
1145                 return 0;
1146         if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1147                 return 1;
1148         else
1149                 return 0;
1150 }
1151
1152 /**
1153  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1154  * @phba: Pointer to HBA context object.
1155  * @ndlp: nodelist pointer for this target.
1156  * @xritag: xri used in this exchange.
1157  * @rxid: Remote Exchange ID.
1158  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1159  *
1160  * This function takes the hbalock.
1161  * The active bit is always set in the active rrq xri_bitmap even
1162  * if there is no slot avaiable for the other rrq information.
1163  *
1164  * returns 0 rrq actived for this xri
1165  *         < 0 No memory or invalid ndlp.
1166  **/
1167 int
1168 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1169                     uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1170 {
1171         unsigned long iflags;
1172         struct lpfc_node_rrq *rrq;
1173         int empty;
1174
1175         if (!ndlp)
1176                 return -EINVAL;
1177
1178         if (!phba->cfg_enable_rrq)
1179                 return -EINVAL;
1180
1181         spin_lock_irqsave(&phba->hbalock, iflags);
1182         if (phba->pport->load_flag & FC_UNLOADING) {
1183                 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1184                 goto out;
1185         }
1186
1187         if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1188                 goto out;
1189
1190         if (!ndlp->active_rrqs_xri_bitmap)
1191                 goto out;
1192
1193         if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1194                 goto out;
1195
1196         spin_unlock_irqrestore(&phba->hbalock, iflags);
1197         rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1198         if (!rrq) {
1199                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1200                                 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1201                                 " DID:0x%x Send:%d\n",
1202                                 xritag, rxid, ndlp->nlp_DID, send_rrq);
1203                 return -EINVAL;
1204         }
1205         if (phba->cfg_enable_rrq == 1)
1206                 rrq->send_rrq = send_rrq;
1207         else
1208                 rrq->send_rrq = 0;
1209         rrq->xritag = xritag;
1210         rrq->rrq_stop_time = jiffies +
1211                                 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1212         rrq->nlp_DID = ndlp->nlp_DID;
1213         rrq->vport = ndlp->vport;
1214         rrq->rxid = rxid;
1215         spin_lock_irqsave(&phba->hbalock, iflags);
1216         empty = list_empty(&phba->active_rrq_list);
1217         list_add_tail(&rrq->list, &phba->active_rrq_list);
1218         phba->hba_flag |= HBA_RRQ_ACTIVE;
1219         if (empty)
1220                 lpfc_worker_wake_up(phba);
1221         spin_unlock_irqrestore(&phba->hbalock, iflags);
1222         return 0;
1223 out:
1224         spin_unlock_irqrestore(&phba->hbalock, iflags);
1225         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1226                         "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1227                         " DID:0x%x Send:%d\n",
1228                         xritag, rxid, ndlp->nlp_DID, send_rrq);
1229         return -EINVAL;
1230 }
1231
1232 /**
1233  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1234  * @phba: Pointer to HBA context object.
1235  * @piocbq: Pointer to the iocbq.
1236  *
1237  * The driver calls this function with either the nvme ls ring lock
1238  * or the fc els ring lock held depending on the iocb usage.  This function
1239  * gets a new driver sglq object from the sglq list. If the list is not empty
1240  * then it is successful, it returns pointer to the newly allocated sglq
1241  * object else it returns NULL.
1242  **/
1243 static struct lpfc_sglq *
1244 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1245 {
1246         struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1247         struct lpfc_sglq *sglq = NULL;
1248         struct lpfc_sglq *start_sglq = NULL;
1249         struct lpfc_io_buf *lpfc_cmd;
1250         struct lpfc_nodelist *ndlp;
1251         int found = 0;
1252         u8 cmnd;
1253
1254         cmnd = get_job_cmnd(phba, piocbq);
1255
1256         if (piocbq->cmd_flag & LPFC_IO_FCP) {
1257                 lpfc_cmd = piocbq->io_buf;
1258                 ndlp = lpfc_cmd->rdata->pnode;
1259         } else  if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1260                         !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1261                 ndlp = piocbq->ndlp;
1262         } else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1263                 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1264                         ndlp = NULL;
1265                 else
1266                         ndlp = piocbq->ndlp;
1267         } else {
1268                 ndlp = piocbq->ndlp;
1269         }
1270
1271         spin_lock(&phba->sli4_hba.sgl_list_lock);
1272         list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1273         start_sglq = sglq;
1274         while (!found) {
1275                 if (!sglq)
1276                         break;
1277                 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1278                     test_bit(sglq->sli4_lxritag,
1279                     ndlp->active_rrqs_xri_bitmap)) {
1280                         /* This xri has an rrq outstanding for this DID.
1281                          * put it back in the list and get another xri.
1282                          */
1283                         list_add_tail(&sglq->list, lpfc_els_sgl_list);
1284                         sglq = NULL;
1285                         list_remove_head(lpfc_els_sgl_list, sglq,
1286                                                 struct lpfc_sglq, list);
1287                         if (sglq == start_sglq) {
1288                                 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1289                                 sglq = NULL;
1290                                 break;
1291                         } else
1292                                 continue;
1293                 }
1294                 sglq->ndlp = ndlp;
1295                 found = 1;
1296                 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1297                 sglq->state = SGL_ALLOCATED;
1298         }
1299         spin_unlock(&phba->sli4_hba.sgl_list_lock);
1300         return sglq;
1301 }
1302
1303 /**
1304  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1305  * @phba: Pointer to HBA context object.
1306  * @piocbq: Pointer to the iocbq.
1307  *
1308  * This function is called with the sgl_list lock held. This function
1309  * gets a new driver sglq object from the sglq list. If the
1310  * list is not empty then it is successful, it returns pointer to the newly
1311  * allocated sglq object else it returns NULL.
1312  **/
1313 struct lpfc_sglq *
1314 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1315 {
1316         struct list_head *lpfc_nvmet_sgl_list;
1317         struct lpfc_sglq *sglq = NULL;
1318
1319         lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1320
1321         lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1322
1323         list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1324         if (!sglq)
1325                 return NULL;
1326         phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1327         sglq->state = SGL_ALLOCATED;
1328         return sglq;
1329 }
1330
1331 /**
1332  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1333  * @phba: Pointer to HBA context object.
1334  *
1335  * This function is called with no lock held. This function
1336  * allocates a new driver iocb object from the iocb pool. If the
1337  * allocation is successful, it returns pointer to the newly
1338  * allocated iocb object else it returns NULL.
1339  **/
1340 struct lpfc_iocbq *
1341 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1342 {
1343         struct lpfc_iocbq * iocbq = NULL;
1344         unsigned long iflags;
1345
1346         spin_lock_irqsave(&phba->hbalock, iflags);
1347         iocbq = __lpfc_sli_get_iocbq(phba);
1348         spin_unlock_irqrestore(&phba->hbalock, iflags);
1349         return iocbq;
1350 }
1351
1352 /**
1353  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1354  * @phba: Pointer to HBA context object.
1355  * @iocbq: Pointer to driver iocb object.
1356  *
1357  * This function is called to release the driver iocb object
1358  * to the iocb pool. The iotag in the iocb object
1359  * does not change for each use of the iocb object. This function
1360  * clears all other fields of the iocb object when it is freed.
1361  * The sqlq structure that holds the xritag and phys and virtual
1362  * mappings for the scatter gather list is retrieved from the
1363  * active array of sglq. The get of the sglq pointer also clears
1364  * the entry in the array. If the status of the IO indiactes that
1365  * this IO was aborted then the sglq entry it put on the
1366  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1367  * IO has good status or fails for any other reason then the sglq
1368  * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1369  *  asserted held in the code path calling this routine.
1370  **/
1371 static void
1372 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1373 {
1374         struct lpfc_sglq *sglq;
1375         unsigned long iflag = 0;
1376         struct lpfc_sli_ring *pring;
1377
1378         if (iocbq->sli4_xritag == NO_XRI)
1379                 sglq = NULL;
1380         else
1381                 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1382
1383
1384         if (sglq)  {
1385                 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1386                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1387                                           iflag);
1388                         sglq->state = SGL_FREED;
1389                         sglq->ndlp = NULL;
1390                         list_add_tail(&sglq->list,
1391                                       &phba->sli4_hba.lpfc_nvmet_sgl_list);
1392                         spin_unlock_irqrestore(
1393                                 &phba->sli4_hba.sgl_list_lock, iflag);
1394                         goto out;
1395                 }
1396
1397                 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1398                     (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1399                     sglq->state != SGL_XRI_ABORTED) {
1400                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1401                                           iflag);
1402
1403                         /* Check if we can get a reference on ndlp */
1404                         if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1405                                 sglq->ndlp = NULL;
1406
1407                         list_add(&sglq->list,
1408                                  &phba->sli4_hba.lpfc_abts_els_sgl_list);
1409                         spin_unlock_irqrestore(
1410                                 &phba->sli4_hba.sgl_list_lock, iflag);
1411                 } else {
1412                         spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1413                                           iflag);
1414                         sglq->state = SGL_FREED;
1415                         sglq->ndlp = NULL;
1416                         list_add_tail(&sglq->list,
1417                                       &phba->sli4_hba.lpfc_els_sgl_list);
1418                         spin_unlock_irqrestore(
1419                                 &phba->sli4_hba.sgl_list_lock, iflag);
1420                         pring = lpfc_phba_elsring(phba);
1421                         /* Check if TXQ queue needs to be serviced */
1422                         if (pring && (!list_empty(&pring->txq)))
1423                                 lpfc_worker_wake_up(phba);
1424                 }
1425         }
1426
1427 out:
1428         /*
1429          * Clean all volatile data fields, preserve iotag and node struct.
1430          */
1431         memset_startat(iocbq, 0, wqe);
1432         iocbq->sli4_lxritag = NO_XRI;
1433         iocbq->sli4_xritag = NO_XRI;
1434         iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1435                               LPFC_IO_NVME_LS);
1436         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1437 }
1438
1439
1440 /**
1441  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1442  * @phba: Pointer to HBA context object.
1443  * @iocbq: Pointer to driver iocb object.
1444  *
1445  * This function is called to release the driver iocb object to the
1446  * iocb pool. The iotag in the iocb object does not change for each
1447  * use of the iocb object. This function clears all other fields of
1448  * the iocb object when it is freed. The hbalock is asserted held in
1449  * the code path calling this routine.
1450  **/
1451 static void
1452 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1453 {
1454
1455         /*
1456          * Clean all volatile data fields, preserve iotag and node struct.
1457          */
1458         memset_startat(iocbq, 0, iocb);
1459         iocbq->sli4_xritag = NO_XRI;
1460         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1461 }
1462
1463 /**
1464  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1465  * @phba: Pointer to HBA context object.
1466  * @iocbq: Pointer to driver iocb object.
1467  *
1468  * This function is called with hbalock held to release driver
1469  * iocb object to the iocb pool. The iotag in the iocb object
1470  * does not change for each use of the iocb object. This function
1471  * clears all other fields of the iocb object when it is freed.
1472  **/
1473 static void
1474 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1475 {
1476         lockdep_assert_held(&phba->hbalock);
1477
1478         phba->__lpfc_sli_release_iocbq(phba, iocbq);
1479         phba->iocb_cnt--;
1480 }
1481
1482 /**
1483  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1484  * @phba: Pointer to HBA context object.
1485  * @iocbq: Pointer to driver iocb object.
1486  *
1487  * This function is called with no lock held to release the iocb to
1488  * iocb pool.
1489  **/
1490 void
1491 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1492 {
1493         unsigned long iflags;
1494
1495         /*
1496          * Clean all volatile data fields, preserve iotag and node struct.
1497          */
1498         spin_lock_irqsave(&phba->hbalock, iflags);
1499         __lpfc_sli_release_iocbq(phba, iocbq);
1500         spin_unlock_irqrestore(&phba->hbalock, iflags);
1501 }
1502
1503 /**
1504  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1505  * @phba: Pointer to HBA context object.
1506  * @iocblist: List of IOCBs.
1507  * @ulpstatus: ULP status in IOCB command field.
1508  * @ulpWord4: ULP word-4 in IOCB command field.
1509  *
1510  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1511  * on the list by invoking the complete callback function associated with the
1512  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1513  * fields.
1514  **/
1515 void
1516 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1517                       uint32_t ulpstatus, uint32_t ulpWord4)
1518 {
1519         struct lpfc_iocbq *piocb;
1520
1521         while (!list_empty(iocblist)) {
1522                 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1523                 if (piocb->cmd_cmpl) {
1524                         if (piocb->cmd_flag & LPFC_IO_NVME) {
1525                                 lpfc_nvme_cancel_iocb(phba, piocb,
1526                                                       ulpstatus, ulpWord4);
1527                         } else {
1528                                 if (phba->sli_rev == LPFC_SLI_REV4) {
1529                                         bf_set(lpfc_wcqe_c_status,
1530                                                &piocb->wcqe_cmpl, ulpstatus);
1531                                         piocb->wcqe_cmpl.parameter = ulpWord4;
1532                                 } else {
1533                                         piocb->iocb.ulpStatus = ulpstatus;
1534                                         piocb->iocb.un.ulpWord[4] = ulpWord4;
1535                                 }
1536                                 (piocb->cmd_cmpl) (phba, piocb, piocb);
1537                         }
1538                 } else {
1539                         lpfc_sli_release_iocbq(phba, piocb);
1540                 }
1541         }
1542         return;
1543 }
1544
1545 /**
1546  * lpfc_sli_iocb_cmd_type - Get the iocb type
1547  * @iocb_cmnd: iocb command code.
1548  *
1549  * This function is called by ring event handler function to get the iocb type.
1550  * This function translates the iocb command to an iocb command type used to
1551  * decide the final disposition of each completed IOCB.
1552  * The function returns
1553  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1554  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1555  * LPFC_ABORT_IOCB   if it is an abort iocb
1556  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1557  *
1558  * The caller is not required to hold any lock.
1559  **/
1560 static lpfc_iocb_type
1561 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1562 {
1563         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1564
1565         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1566                 return 0;
1567
1568         switch (iocb_cmnd) {
1569         case CMD_XMIT_SEQUENCE_CR:
1570         case CMD_XMIT_SEQUENCE_CX:
1571         case CMD_XMIT_BCAST_CN:
1572         case CMD_XMIT_BCAST_CX:
1573         case CMD_ELS_REQUEST_CR:
1574         case CMD_ELS_REQUEST_CX:
1575         case CMD_CREATE_XRI_CR:
1576         case CMD_CREATE_XRI_CX:
1577         case CMD_GET_RPI_CN:
1578         case CMD_XMIT_ELS_RSP_CX:
1579         case CMD_GET_RPI_CR:
1580         case CMD_FCP_IWRITE_CR:
1581         case CMD_FCP_IWRITE_CX:
1582         case CMD_FCP_IREAD_CR:
1583         case CMD_FCP_IREAD_CX:
1584         case CMD_FCP_ICMND_CR:
1585         case CMD_FCP_ICMND_CX:
1586         case CMD_FCP_TSEND_CX:
1587         case CMD_FCP_TRSP_CX:
1588         case CMD_FCP_TRECEIVE_CX:
1589         case CMD_FCP_AUTO_TRSP_CX:
1590         case CMD_ADAPTER_MSG:
1591         case CMD_ADAPTER_DUMP:
1592         case CMD_XMIT_SEQUENCE64_CR:
1593         case CMD_XMIT_SEQUENCE64_CX:
1594         case CMD_XMIT_BCAST64_CN:
1595         case CMD_XMIT_BCAST64_CX:
1596         case CMD_ELS_REQUEST64_CR:
1597         case CMD_ELS_REQUEST64_CX:
1598         case CMD_FCP_IWRITE64_CR:
1599         case CMD_FCP_IWRITE64_CX:
1600         case CMD_FCP_IREAD64_CR:
1601         case CMD_FCP_IREAD64_CX:
1602         case CMD_FCP_ICMND64_CR:
1603         case CMD_FCP_ICMND64_CX:
1604         case CMD_FCP_TSEND64_CX:
1605         case CMD_FCP_TRSP64_CX:
1606         case CMD_FCP_TRECEIVE64_CX:
1607         case CMD_GEN_REQUEST64_CR:
1608         case CMD_GEN_REQUEST64_CX:
1609         case CMD_XMIT_ELS_RSP64_CX:
1610         case DSSCMD_IWRITE64_CR:
1611         case DSSCMD_IWRITE64_CX:
1612         case DSSCMD_IREAD64_CR:
1613         case DSSCMD_IREAD64_CX:
1614         case CMD_SEND_FRAME:
1615                 type = LPFC_SOL_IOCB;
1616                 break;
1617         case CMD_ABORT_XRI_CN:
1618         case CMD_ABORT_XRI_CX:
1619         case CMD_CLOSE_XRI_CN:
1620         case CMD_CLOSE_XRI_CX:
1621         case CMD_XRI_ABORTED_CX:
1622         case CMD_ABORT_MXRI64_CN:
1623         case CMD_XMIT_BLS_RSP64_CX:
1624                 type = LPFC_ABORT_IOCB;
1625                 break;
1626         case CMD_RCV_SEQUENCE_CX:
1627         case CMD_RCV_ELS_REQ_CX:
1628         case CMD_RCV_SEQUENCE64_CX:
1629         case CMD_RCV_ELS_REQ64_CX:
1630         case CMD_ASYNC_STATUS:
1631         case CMD_IOCB_RCV_SEQ64_CX:
1632         case CMD_IOCB_RCV_ELS64_CX:
1633         case CMD_IOCB_RCV_CONT64_CX:
1634         case CMD_IOCB_RET_XRI64_CX:
1635                 type = LPFC_UNSOL_IOCB;
1636                 break;
1637         case CMD_IOCB_XMIT_MSEQ64_CR:
1638         case CMD_IOCB_XMIT_MSEQ64_CX:
1639         case CMD_IOCB_RCV_SEQ_LIST64_CX:
1640         case CMD_IOCB_RCV_ELS_LIST64_CX:
1641         case CMD_IOCB_CLOSE_EXTENDED_CN:
1642         case CMD_IOCB_ABORT_EXTENDED_CN:
1643         case CMD_IOCB_RET_HBQE64_CN:
1644         case CMD_IOCB_FCP_IBIDIR64_CR:
1645         case CMD_IOCB_FCP_IBIDIR64_CX:
1646         case CMD_IOCB_FCP_ITASKMGT64_CX:
1647         case CMD_IOCB_LOGENTRY_CN:
1648         case CMD_IOCB_LOGENTRY_ASYNC_CN:
1649                 printk("%s - Unhandled SLI-3 Command x%x\n",
1650                                 __func__, iocb_cmnd);
1651                 type = LPFC_UNKNOWN_IOCB;
1652                 break;
1653         default:
1654                 type = LPFC_UNKNOWN_IOCB;
1655                 break;
1656         }
1657
1658         return type;
1659 }
1660
1661 /**
1662  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1663  * @phba: Pointer to HBA context object.
1664  *
1665  * This function is called from SLI initialization code
1666  * to configure every ring of the HBA's SLI interface. The
1667  * caller is not required to hold any lock. This function issues
1668  * a config_ring mailbox command for each ring.
1669  * This function returns zero if successful else returns a negative
1670  * error code.
1671  **/
1672 static int
1673 lpfc_sli_ring_map(struct lpfc_hba *phba)
1674 {
1675         struct lpfc_sli *psli = &phba->sli;
1676         LPFC_MBOXQ_t *pmb;
1677         MAILBOX_t *pmbox;
1678         int i, rc, ret = 0;
1679
1680         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1681         if (!pmb)
1682                 return -ENOMEM;
1683         pmbox = &pmb->u.mb;
1684         phba->link_state = LPFC_INIT_MBX_CMDS;
1685         for (i = 0; i < psli->num_rings; i++) {
1686                 lpfc_config_ring(phba, i, pmb);
1687                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1688                 if (rc != MBX_SUCCESS) {
1689                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1690                                         "0446 Adapter failed to init (%d), "
1691                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1692                                         "ring %d\n",
1693                                         rc, pmbox->mbxCommand,
1694                                         pmbox->mbxStatus, i);
1695                         phba->link_state = LPFC_HBA_ERROR;
1696                         ret = -ENXIO;
1697                         break;
1698                 }
1699         }
1700         mempool_free(pmb, phba->mbox_mem_pool);
1701         return ret;
1702 }
1703
1704 /**
1705  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1706  * @phba: Pointer to HBA context object.
1707  * @pring: Pointer to driver SLI ring object.
1708  * @piocb: Pointer to the driver iocb object.
1709  *
1710  * The driver calls this function with the hbalock held for SLI3 ports or
1711  * the ring lock held for SLI4 ports. The function adds the
1712  * new iocb to txcmplq of the given ring. This function always returns
1713  * 0. If this function is called for ELS ring, this function checks if
1714  * there is a vport associated with the ELS command. This function also
1715  * starts els_tmofunc timer if this is an ELS command.
1716  **/
1717 static int
1718 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1719                         struct lpfc_iocbq *piocb)
1720 {
1721         u32 ulp_command = 0;
1722
1723         BUG_ON(!piocb);
1724         ulp_command = get_job_cmnd(phba, piocb);
1725
1726         list_add_tail(&piocb->list, &pring->txcmplq);
1727         piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1728         pring->txcmplq_cnt++;
1729         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1730            (ulp_command != CMD_ABORT_XRI_WQE) &&
1731            (ulp_command != CMD_ABORT_XRI_CN) &&
1732            (ulp_command != CMD_CLOSE_XRI_CN)) {
1733                 BUG_ON(!piocb->vport);
1734                 if (!(piocb->vport->load_flag & FC_UNLOADING))
1735                         mod_timer(&piocb->vport->els_tmofunc,
1736                                   jiffies +
1737                                   msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1738         }
1739
1740         return 0;
1741 }
1742
1743 /**
1744  * lpfc_sli_ringtx_get - Get first element of the txq
1745  * @phba: Pointer to HBA context object.
1746  * @pring: Pointer to driver SLI ring object.
1747  *
1748  * This function is called with hbalock held to get next
1749  * iocb in txq of the given ring. If there is any iocb in
1750  * the txq, the function returns first iocb in the list after
1751  * removing the iocb from the list, else it returns NULL.
1752  **/
1753 struct lpfc_iocbq *
1754 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1755 {
1756         struct lpfc_iocbq *cmd_iocb;
1757
1758         lockdep_assert_held(&phba->hbalock);
1759
1760         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1761         return cmd_iocb;
1762 }
1763
1764 /**
1765  * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1766  * @phba: Pointer to HBA context object.
1767  * @cmdiocb: Pointer to driver command iocb object.
1768  * @rspiocb: Pointer to driver response iocb object.
1769  *
1770  * This routine will inform the driver of any BW adjustments we need
1771  * to make. These changes will be picked up during the next CMF
1772  * timer interrupt. In addition, any BW changes will be logged
1773  * with LOG_CGN_MGMT.
1774  **/
1775 static void
1776 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1777                    struct lpfc_iocbq *rspiocb)
1778 {
1779         union lpfc_wqe128 *wqe;
1780         uint32_t status, info;
1781         struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1782         uint64_t bw, bwdif, slop;
1783         uint64_t pcent, bwpcent;
1784         int asig, afpin, sigcnt, fpincnt;
1785         int wsigmax, wfpinmax, cg, tdp;
1786         char *s;
1787
1788         /* First check for error */
1789         status = bf_get(lpfc_wcqe_c_status, wcqe);
1790         if (status) {
1791                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1792                                 "6211 CMF_SYNC_WQE Error "
1793                                 "req_tag x%x status x%x hwstatus x%x "
1794                                 "tdatap x%x parm x%x\n",
1795                                 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1796                                 bf_get(lpfc_wcqe_c_status, wcqe),
1797                                 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1798                                 wcqe->total_data_placed,
1799                                 wcqe->parameter);
1800                 goto out;
1801         }
1802
1803         /* Gather congestion information on a successful cmpl */
1804         info = wcqe->parameter;
1805         phba->cmf_active_info = info;
1806
1807         /* See if firmware info count is valid or has changed */
1808         if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1809                 info = 0;
1810         else
1811                 phba->cmf_info_per_interval = info;
1812
1813         tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1814         cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1815
1816         /* Get BW requirement from firmware */
1817         bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1818         if (!bw) {
1819                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1820                                 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1821                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1822                 goto out;
1823         }
1824
1825         /* Gather information needed for logging if a BW change is required */
1826         wqe = &cmdiocb->wqe;
1827         asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1828         afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1829         fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1830         sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1831         if (phba->cmf_max_bytes_per_interval != bw ||
1832             (asig || afpin || sigcnt || fpincnt)) {
1833                 /* Are we increasing or decreasing BW */
1834                 if (phba->cmf_max_bytes_per_interval <  bw) {
1835                         bwdif = bw - phba->cmf_max_bytes_per_interval;
1836                         s = "Increase";
1837                 } else {
1838                         bwdif = phba->cmf_max_bytes_per_interval - bw;
1839                         s = "Decrease";
1840                 }
1841
1842                 /* What is the change percentage */
1843                 slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1844                 pcent = div64_u64(bwdif * 100 + slop,
1845                                   phba->cmf_link_byte_count);
1846                 bwpcent = div64_u64(bw * 100 + slop,
1847                                     phba->cmf_link_byte_count);
1848                 /* Because of bytes adjustment due to shorter timer in
1849                  * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and
1850                  * may seem like BW is above 100%.
1851                  */
1852                 if (bwpcent > 100)
1853                         bwpcent = 100;
1854
1855                 if (phba->cmf_max_bytes_per_interval < bw &&
1856                     bwpcent > 95)
1857                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1858                                         "6208 Congestion bandwidth "
1859                                         "limits removed\n");
1860                 else if ((phba->cmf_max_bytes_per_interval > bw) &&
1861                          ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95))
1862                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1863                                         "6209 Congestion bandwidth "
1864                                         "limits in effect\n");
1865
1866                 if (asig) {
1867                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1868                                         "6237 BW Threshold %lld%% (%lld): "
1869                                         "%lld%% %s: Signal Alarm: cg:%d "
1870                                         "Info:%u\n",
1871                                         bwpcent, bw, pcent, s, cg,
1872                                         phba->cmf_active_info);
1873                 } else if (afpin) {
1874                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1875                                         "6238 BW Threshold %lld%% (%lld): "
1876                                         "%lld%% %s: FPIN Alarm: cg:%d "
1877                                         "Info:%u\n",
1878                                         bwpcent, bw, pcent, s, cg,
1879                                         phba->cmf_active_info);
1880                 } else if (sigcnt) {
1881                         wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1882                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1883                                         "6239 BW Threshold %lld%% (%lld): "
1884                                         "%lld%% %s: Signal Warning: "
1885                                         "Cnt %d Max %d: cg:%d Info:%u\n",
1886                                         bwpcent, bw, pcent, s, sigcnt,
1887                                         wsigmax, cg, phba->cmf_active_info);
1888                 } else if (fpincnt) {
1889                         wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1890                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1891                                         "6240 BW Threshold %lld%% (%lld): "
1892                                         "%lld%% %s: FPIN Warning: "
1893                                         "Cnt %d Max %d: cg:%d Info:%u\n",
1894                                         bwpcent, bw, pcent, s, fpincnt,
1895                                         wfpinmax, cg, phba->cmf_active_info);
1896                 } else {
1897                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1898                                         "6241 BW Threshold %lld%% (%lld): "
1899                                         "CMF %lld%% %s: cg:%d Info:%u\n",
1900                                         bwpcent, bw, pcent, s, cg,
1901                                         phba->cmf_active_info);
1902                 }
1903         } else if (info) {
1904                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1905                                 "6246 Info Threshold %u\n", info);
1906         }
1907
1908         /* Save BW change to be picked up during next timer interrupt */
1909         phba->cmf_last_sync_bw = bw;
1910 out:
1911         lpfc_sli_release_iocbq(phba, cmdiocb);
1912 }
1913
1914 /**
1915  * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1916  * @phba: Pointer to HBA context object.
1917  * @ms:   ms to set in WQE interval, 0 means use init op
1918  * @total: Total rcv bytes for this interval
1919  *
1920  * This routine is called every CMF timer interrupt. Its purpose is
1921  * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1922  * that may indicate we have congestion (FPINs or Signals). Upon
1923  * completion, the firmware will indicate any BW restrictions the
1924  * driver may need to take.
1925  **/
1926 int
1927 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1928 {
1929         union lpfc_wqe128 *wqe;
1930         struct lpfc_iocbq *sync_buf;
1931         unsigned long iflags;
1932         u32 ret_val;
1933         u32 atot, wtot, max;
1934         u16 warn_sync_period = 0;
1935
1936         /* First address any alarm / warning activity */
1937         atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1938         wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1939
1940         /* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1941         if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1942             phba->link_state == LPFC_LINK_DOWN)
1943                 return 0;
1944
1945         spin_lock_irqsave(&phba->hbalock, iflags);
1946         sync_buf = __lpfc_sli_get_iocbq(phba);
1947         if (!sync_buf) {
1948                 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1949                                 "6244 No available WQEs for CMF_SYNC_WQE\n");
1950                 ret_val = ENOMEM;
1951                 goto out_unlock;
1952         }
1953
1954         wqe = &sync_buf->wqe;
1955
1956         /* WQEs are reused.  Clear stale data and set key fields to zero */
1957         memset(wqe, 0, sizeof(*wqe));
1958
1959         /* If this is the very first CMF_SYNC_WQE, issue an init operation */
1960         if (!ms) {
1961                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1962                                 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1963                                 phba->fc_eventTag);
1964                 bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1965                 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1966                 goto initpath;
1967         }
1968
1969         bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1970         bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1971
1972         /* Check for alarms / warnings */
1973         if (atot) {
1974                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1975                         /* We hit an Signal alarm condition */
1976                         bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1977                 } else {
1978                         /* We hit a FPIN alarm condition */
1979                         bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1980                 }
1981         } else if (wtot) {
1982                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1983                     phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1984                         /* We hit an Signal warning condition */
1985                         max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1986                                 lpfc_acqe_cgn_frequency;
1987                         bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1988                         bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1989                         warn_sync_period = lpfc_acqe_cgn_frequency;
1990                 } else {
1991                         /* We hit a FPIN warning condition */
1992                         bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1993                         bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1994                         if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
1995                                 warn_sync_period =
1996                                 LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
1997                 }
1998         }
1999
2000         /* Update total read blocks during previous timer interval */
2001         wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
2002
2003 initpath:
2004         bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
2005         wqe->cmf_sync.event_tag = phba->fc_eventTag;
2006         bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
2007
2008         /* Setup reqtag to match the wqe completion. */
2009         bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
2010
2011         bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2012         bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
2013
2014         bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2015         bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2016         bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2017
2018         sync_buf->vport = phba->pport;
2019         sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2020         sync_buf->cmd_dmabuf = NULL;
2021         sync_buf->rsp_dmabuf = NULL;
2022         sync_buf->bpl_dmabuf = NULL;
2023         sync_buf->sli4_xritag = NO_XRI;
2024
2025         sync_buf->cmd_flag |= LPFC_IO_CMF;
2026         ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2027         if (ret_val) {
2028                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2029                                 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2030                                 ret_val);
2031                 __lpfc_sli_release_iocbq(phba, sync_buf);
2032         }
2033 out_unlock:
2034         spin_unlock_irqrestore(&phba->hbalock, iflags);
2035         return ret_val;
2036 }
2037
2038 /**
2039  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2040  * @phba: Pointer to HBA context object.
2041  * @pring: Pointer to driver SLI ring object.
2042  *
2043  * This function is called with hbalock held and the caller must post the
2044  * iocb without releasing the lock. If the caller releases the lock,
2045  * iocb slot returned by the function is not guaranteed to be available.
2046  * The function returns pointer to the next available iocb slot if there
2047  * is available slot in the ring, else it returns NULL.
2048  * If the get index of the ring is ahead of the put index, the function
2049  * will post an error attention event to the worker thread to take the
2050  * HBA to offline state.
2051  **/
2052 static IOCB_t *
2053 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2054 {
2055         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2056         uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
2057
2058         lockdep_assert_held(&phba->hbalock);
2059
2060         if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2061            (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2062                 pring->sli.sli3.next_cmdidx = 0;
2063
2064         if (unlikely(pring->sli.sli3.local_getidx ==
2065                 pring->sli.sli3.next_cmdidx)) {
2066
2067                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2068
2069                 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2070                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2071                                         "0315 Ring %d issue: portCmdGet %d "
2072                                         "is bigger than cmd ring %d\n",
2073                                         pring->ringno,
2074                                         pring->sli.sli3.local_getidx,
2075                                         max_cmd_idx);
2076
2077                         phba->link_state = LPFC_HBA_ERROR;
2078                         /*
2079                          * All error attention handlers are posted to
2080                          * worker thread
2081                          */
2082                         phba->work_ha |= HA_ERATT;
2083                         phba->work_hs = HS_FFER3;
2084
2085                         lpfc_worker_wake_up(phba);
2086
2087                         return NULL;
2088                 }
2089
2090                 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2091                         return NULL;
2092         }
2093
2094         return lpfc_cmd_iocb(phba, pring);
2095 }
2096
2097 /**
2098  * lpfc_sli_next_iotag - Get an iotag for the iocb
2099  * @phba: Pointer to HBA context object.
2100  * @iocbq: Pointer to driver iocb object.
2101  *
2102  * This function gets an iotag for the iocb. If there is no unused iotag and
2103  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2104  * array and assigns a new iotag.
2105  * The function returns the allocated iotag if successful, else returns zero.
2106  * Zero is not a valid iotag.
2107  * The caller is not required to hold any lock.
2108  **/
2109 uint16_t
2110 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2111 {
2112         struct lpfc_iocbq **new_arr;
2113         struct lpfc_iocbq **old_arr;
2114         size_t new_len;
2115         struct lpfc_sli *psli = &phba->sli;
2116         uint16_t iotag;
2117
2118         spin_lock_irq(&phba->hbalock);
2119         iotag = psli->last_iotag;
2120         if(++iotag < psli->iocbq_lookup_len) {
2121                 psli->last_iotag = iotag;
2122                 psli->iocbq_lookup[iotag] = iocbq;
2123                 spin_unlock_irq(&phba->hbalock);
2124                 iocbq->iotag = iotag;
2125                 return iotag;
2126         } else if (psli->iocbq_lookup_len < (0xffff
2127                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2128                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2129                 spin_unlock_irq(&phba->hbalock);
2130                 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2131                                   GFP_KERNEL);
2132                 if (new_arr) {
2133                         spin_lock_irq(&phba->hbalock);
2134                         old_arr = psli->iocbq_lookup;
2135                         if (new_len <= psli->iocbq_lookup_len) {
2136                                 /* highly unprobable case */
2137                                 kfree(new_arr);
2138                                 iotag = psli->last_iotag;
2139                                 if(++iotag < psli->iocbq_lookup_len) {
2140                                         psli->last_iotag = iotag;
2141                                         psli->iocbq_lookup[iotag] = iocbq;
2142                                         spin_unlock_irq(&phba->hbalock);
2143                                         iocbq->iotag = iotag;
2144                                         return iotag;
2145                                 }
2146                                 spin_unlock_irq(&phba->hbalock);
2147                                 return 0;
2148                         }
2149                         if (psli->iocbq_lookup)
2150                                 memcpy(new_arr, old_arr,
2151                                        ((psli->last_iotag  + 1) *
2152                                         sizeof (struct lpfc_iocbq *)));
2153                         psli->iocbq_lookup = new_arr;
2154                         psli->iocbq_lookup_len = new_len;
2155                         psli->last_iotag = iotag;
2156                         psli->iocbq_lookup[iotag] = iocbq;
2157                         spin_unlock_irq(&phba->hbalock);
2158                         iocbq->iotag = iotag;
2159                         kfree(old_arr);
2160                         return iotag;
2161                 }
2162         } else
2163                 spin_unlock_irq(&phba->hbalock);
2164
2165         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2166                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2167                         psli->last_iotag);
2168
2169         return 0;
2170 }
2171
2172 /**
2173  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2174  * @phba: Pointer to HBA context object.
2175  * @pring: Pointer to driver SLI ring object.
2176  * @iocb: Pointer to iocb slot in the ring.
2177  * @nextiocb: Pointer to driver iocb object which need to be
2178  *            posted to firmware.
2179  *
2180  * This function is called to post a new iocb to the firmware. This
2181  * function copies the new iocb to ring iocb slot and updates the
2182  * ring pointers. It adds the new iocb to txcmplq if there is
2183  * a completion call back for this iocb else the function will free the
2184  * iocb object.  The hbalock is asserted held in the code path calling
2185  * this routine.
2186  **/
2187 static void
2188 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2189                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2190 {
2191         /*
2192          * Set up an iotag
2193          */
2194         nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2195
2196
2197         if (pring->ringno == LPFC_ELS_RING) {
2198                 lpfc_debugfs_slow_ring_trc(phba,
2199                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2200                         *(((uint32_t *) &nextiocb->iocb) + 4),
2201                         *(((uint32_t *) &nextiocb->iocb) + 6),
2202                         *(((uint32_t *) &nextiocb->iocb) + 7));
2203         }
2204
2205         /*
2206          * Issue iocb command to adapter
2207          */
2208         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2209         wmb();
2210         pring->stats.iocb_cmd++;
2211
2212         /*
2213          * If there is no completion routine to call, we can release the
2214          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2215          * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2216          */
2217         if (nextiocb->cmd_cmpl)
2218                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2219         else
2220                 __lpfc_sli_release_iocbq(phba, nextiocb);
2221
2222         /*
2223          * Let the HBA know what IOCB slot will be the next one the
2224          * driver will put a command into.
2225          */
2226         pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2227         writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2228 }
2229
2230 /**
2231  * lpfc_sli_update_full_ring - Update the chip attention register
2232  * @phba: Pointer to HBA context object.
2233  * @pring: Pointer to driver SLI ring object.
2234  *
2235  * The caller is not required to hold any lock for calling this function.
2236  * This function updates the chip attention bits for the ring to inform firmware
2237  * that there are pending work to be done for this ring and requests an
2238  * interrupt when there is space available in the ring. This function is
2239  * called when the driver is unable to post more iocbs to the ring due
2240  * to unavailability of space in the ring.
2241  **/
2242 static void
2243 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2244 {
2245         int ringno = pring->ringno;
2246
2247         pring->flag |= LPFC_CALL_RING_AVAILABLE;
2248
2249         wmb();
2250
2251         /*
2252          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2253          * The HBA will tell us when an IOCB entry is available.
2254          */
2255         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2256         readl(phba->CAregaddr); /* flush */
2257
2258         pring->stats.iocb_cmd_full++;
2259 }
2260
2261 /**
2262  * lpfc_sli_update_ring - Update chip attention register
2263  * @phba: Pointer to HBA context object.
2264  * @pring: Pointer to driver SLI ring object.
2265  *
2266  * This function updates the chip attention register bit for the
2267  * given ring to inform HBA that there is more work to be done
2268  * in this ring. The caller is not required to hold any lock.
2269  **/
2270 static void
2271 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2272 {
2273         int ringno = pring->ringno;
2274
2275         /*
2276          * Tell the HBA that there is work to do in this ring.
2277          */
2278         if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2279                 wmb();
2280                 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2281                 readl(phba->CAregaddr); /* flush */
2282         }
2283 }
2284
2285 /**
2286  * lpfc_sli_resume_iocb - Process iocbs in the txq
2287  * @phba: Pointer to HBA context object.
2288  * @pring: Pointer to driver SLI ring object.
2289  *
2290  * This function is called with hbalock held to post pending iocbs
2291  * in the txq to the firmware. This function is called when driver
2292  * detects space available in the ring.
2293  **/
2294 static void
2295 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2296 {
2297         IOCB_t *iocb;
2298         struct lpfc_iocbq *nextiocb;
2299
2300         lockdep_assert_held(&phba->hbalock);
2301
2302         /*
2303          * Check to see if:
2304          *  (a) there is anything on the txq to send
2305          *  (b) link is up
2306          *  (c) link attention events can be processed (fcp ring only)
2307          *  (d) IOCB processing is not blocked by the outstanding mbox command.
2308          */
2309
2310         if (lpfc_is_link_up(phba) &&
2311             (!list_empty(&pring->txq)) &&
2312             (pring->ringno != LPFC_FCP_RING ||
2313              phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2314
2315                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2316                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2317                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2318
2319                 if (iocb)
2320                         lpfc_sli_update_ring(phba, pring);
2321                 else
2322                         lpfc_sli_update_full_ring(phba, pring);
2323         }
2324
2325         return;
2326 }
2327
2328 /**
2329  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2330  * @phba: Pointer to HBA context object.
2331  * @hbqno: HBQ number.
2332  *
2333  * This function is called with hbalock held to get the next
2334  * available slot for the given HBQ. If there is free slot
2335  * available for the HBQ it will return pointer to the next available
2336  * HBQ entry else it will return NULL.
2337  **/
2338 static struct lpfc_hbq_entry *
2339 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2340 {
2341         struct hbq_s *hbqp = &phba->hbqs[hbqno];
2342
2343         lockdep_assert_held(&phba->hbalock);
2344
2345         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2346             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2347                 hbqp->next_hbqPutIdx = 0;
2348
2349         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2350                 uint32_t raw_index = phba->hbq_get[hbqno];
2351                 uint32_t getidx = le32_to_cpu(raw_index);
2352
2353                 hbqp->local_hbqGetIdx = getidx;
2354
2355                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2356                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2357                                         "1802 HBQ %d: local_hbqGetIdx "
2358                                         "%u is > than hbqp->entry_count %u\n",
2359                                         hbqno, hbqp->local_hbqGetIdx,
2360                                         hbqp->entry_count);
2361
2362                         phba->link_state = LPFC_HBA_ERROR;
2363                         return NULL;
2364                 }
2365
2366                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2367                         return NULL;
2368         }
2369
2370         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2371                         hbqp->hbqPutIdx;
2372 }
2373
2374 /**
2375  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2376  * @phba: Pointer to HBA context object.
2377  *
2378  * This function is called with no lock held to free all the
2379  * hbq buffers while uninitializing the SLI interface. It also
2380  * frees the HBQ buffers returned by the firmware but not yet
2381  * processed by the upper layers.
2382  **/
2383 void
2384 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2385 {
2386         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2387         struct hbq_dmabuf *hbq_buf;
2388         unsigned long flags;
2389         int i, hbq_count;
2390
2391         hbq_count = lpfc_sli_hbq_count();
2392         /* Return all memory used by all HBQs */
2393         spin_lock_irqsave(&phba->hbalock, flags);
2394         for (i = 0; i < hbq_count; ++i) {
2395                 list_for_each_entry_safe(dmabuf, next_dmabuf,
2396                                 &phba->hbqs[i].hbq_buffer_list, list) {
2397                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2398                         list_del(&hbq_buf->dbuf.list);
2399                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2400                 }
2401                 phba->hbqs[i].buffer_count = 0;
2402         }
2403
2404         /* Mark the HBQs not in use */
2405         phba->hbq_in_use = 0;
2406         spin_unlock_irqrestore(&phba->hbalock, flags);
2407 }
2408
2409 /**
2410  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2411  * @phba: Pointer to HBA context object.
2412  * @hbqno: HBQ number.
2413  * @hbq_buf: Pointer to HBQ buffer.
2414  *
2415  * This function is called with the hbalock held to post a
2416  * hbq buffer to the firmware. If the function finds an empty
2417  * slot in the HBQ, it will post the buffer. The function will return
2418  * pointer to the hbq entry if it successfully post the buffer
2419  * else it will return NULL.
2420  **/
2421 static int
2422 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2423                          struct hbq_dmabuf *hbq_buf)
2424 {
2425         lockdep_assert_held(&phba->hbalock);
2426         return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2427 }
2428
2429 /**
2430  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2431  * @phba: Pointer to HBA context object.
2432  * @hbqno: HBQ number.
2433  * @hbq_buf: Pointer to HBQ buffer.
2434  *
2435  * This function is called with the hbalock held to post a hbq buffer to the
2436  * firmware. If the function finds an empty slot in the HBQ, it will post the
2437  * buffer and place it on the hbq_buffer_list. The function will return zero if
2438  * it successfully post the buffer else it will return an error.
2439  **/
2440 static int
2441 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2442                             struct hbq_dmabuf *hbq_buf)
2443 {
2444         struct lpfc_hbq_entry *hbqe;
2445         dma_addr_t physaddr = hbq_buf->dbuf.phys;
2446
2447         lockdep_assert_held(&phba->hbalock);
2448         /* Get next HBQ entry slot to use */
2449         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2450         if (hbqe) {
2451                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2452
2453                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2454                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2455                 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2456                 hbqe->bde.tus.f.bdeFlags = 0;
2457                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2458                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2459                                 /* Sync SLIM */
2460                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2461                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2462                                 /* flush */
2463                 readl(phba->hbq_put + hbqno);
2464                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2465                 return 0;
2466         } else
2467                 return -ENOMEM;
2468 }
2469
2470 /**
2471  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2472  * @phba: Pointer to HBA context object.
2473  * @hbqno: HBQ number.
2474  * @hbq_buf: Pointer to HBQ buffer.
2475  *
2476  * This function is called with the hbalock held to post an RQE to the SLI4
2477  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2478  * the hbq_buffer_list and return zero, otherwise it will return an error.
2479  **/
2480 static int
2481 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2482                             struct hbq_dmabuf *hbq_buf)
2483 {
2484         int rc;
2485         struct lpfc_rqe hrqe;
2486         struct lpfc_rqe drqe;
2487         struct lpfc_queue *hrq;
2488         struct lpfc_queue *drq;
2489
2490         if (hbqno != LPFC_ELS_HBQ)
2491                 return 1;
2492         hrq = phba->sli4_hba.hdr_rq;
2493         drq = phba->sli4_hba.dat_rq;
2494
2495         lockdep_assert_held(&phba->hbalock);
2496         hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2497         hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2498         drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2499         drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2500         rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2501         if (rc < 0)
2502                 return rc;
2503         hbq_buf->tag = (rc | (hbqno << 16));
2504         list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2505         return 0;
2506 }
2507
2508 /* HBQ for ELS and CT traffic. */
2509 static struct lpfc_hbq_init lpfc_els_hbq = {
2510         .rn = 1,
2511         .entry_count = 256,
2512         .mask_count = 0,
2513         .profile = 0,
2514         .ring_mask = (1 << LPFC_ELS_RING),
2515         .buffer_count = 0,
2516         .init_count = 40,
2517         .add_count = 40,
2518 };
2519
2520 /* Array of HBQs */
2521 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2522         &lpfc_els_hbq,
2523 };
2524
2525 /**
2526  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2527  * @phba: Pointer to HBA context object.
2528  * @hbqno: HBQ number.
2529  * @count: Number of HBQ buffers to be posted.
2530  *
2531  * This function is called with no lock held to post more hbq buffers to the
2532  * given HBQ. The function returns the number of HBQ buffers successfully
2533  * posted.
2534  **/
2535 static int
2536 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2537 {
2538         uint32_t i, posted = 0;
2539         unsigned long flags;
2540         struct hbq_dmabuf *hbq_buffer;
2541         LIST_HEAD(hbq_buf_list);
2542         if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2543                 return 0;
2544
2545         if ((phba->hbqs[hbqno].buffer_count + count) >
2546             lpfc_hbq_defs[hbqno]->entry_count)
2547                 count = lpfc_hbq_defs[hbqno]->entry_count -
2548                                         phba->hbqs[hbqno].buffer_count;
2549         if (!count)
2550                 return 0;
2551         /* Allocate HBQ entries */
2552         for (i = 0; i < count; i++) {
2553                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2554                 if (!hbq_buffer)
2555                         break;
2556                 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2557         }
2558         /* Check whether HBQ is still in use */
2559         spin_lock_irqsave(&phba->hbalock, flags);
2560         if (!phba->hbq_in_use)
2561                 goto err;
2562         while (!list_empty(&hbq_buf_list)) {
2563                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2564                                  dbuf.list);
2565                 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2566                                       (hbqno << 16));
2567                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2568                         phba->hbqs[hbqno].buffer_count++;
2569                         posted++;
2570                 } else
2571                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2572         }
2573         spin_unlock_irqrestore(&phba->hbalock, flags);
2574         return posted;
2575 err:
2576         spin_unlock_irqrestore(&phba->hbalock, flags);
2577         while (!list_empty(&hbq_buf_list)) {
2578                 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2579                                  dbuf.list);
2580                 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2581         }
2582         return 0;
2583 }
2584
2585 /**
2586  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2587  * @phba: Pointer to HBA context object.
2588  * @qno: HBQ number.
2589  *
2590  * This function posts more buffers to the HBQ. This function
2591  * is called with no lock held. The function returns the number of HBQ entries
2592  * successfully allocated.
2593  **/
2594 int
2595 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2596 {
2597         if (phba->sli_rev == LPFC_SLI_REV4)
2598                 return 0;
2599         else
2600                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2601                                          lpfc_hbq_defs[qno]->add_count);
2602 }
2603
2604 /**
2605  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2606  * @phba: Pointer to HBA context object.
2607  * @qno:  HBQ queue number.
2608  *
2609  * This function is called from SLI initialization code path with
2610  * no lock held to post initial HBQ buffers to firmware. The
2611  * function returns the number of HBQ entries successfully allocated.
2612  **/
2613 static int
2614 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2615 {
2616         if (phba->sli_rev == LPFC_SLI_REV4)
2617                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2618                                         lpfc_hbq_defs[qno]->entry_count);
2619         else
2620                 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2621                                          lpfc_hbq_defs[qno]->init_count);
2622 }
2623
2624 /*
2625  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2626  *
2627  * This function removes the first hbq buffer on an hbq list and returns a
2628  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2629  **/
2630 static struct hbq_dmabuf *
2631 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2632 {
2633         struct lpfc_dmabuf *d_buf;
2634
2635         list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2636         if (!d_buf)
2637                 return NULL;
2638         return container_of(d_buf, struct hbq_dmabuf, dbuf);
2639 }
2640
2641 /**
2642  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2643  * @phba: Pointer to HBA context object.
2644  * @hrq: HBQ number.
2645  *
2646  * This function removes the first RQ buffer on an RQ buffer list and returns a
2647  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2648  **/
2649 static struct rqb_dmabuf *
2650 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2651 {
2652         struct lpfc_dmabuf *h_buf;
2653         struct lpfc_rqb *rqbp;
2654
2655         rqbp = hrq->rqbp;
2656         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2657                          struct lpfc_dmabuf, list);
2658         if (!h_buf)
2659                 return NULL;
2660         rqbp->buffer_count--;
2661         return container_of(h_buf, struct rqb_dmabuf, hbuf);
2662 }
2663
2664 /**
2665  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2666  * @phba: Pointer to HBA context object.
2667  * @tag: Tag of the hbq buffer.
2668  *
2669  * This function searches for the hbq buffer associated with the given tag in
2670  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2671  * otherwise it returns NULL.
2672  **/
2673 static struct hbq_dmabuf *
2674 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2675 {
2676         struct lpfc_dmabuf *d_buf;
2677         struct hbq_dmabuf *hbq_buf;
2678         uint32_t hbqno;
2679
2680         hbqno = tag >> 16;
2681         if (hbqno >= LPFC_MAX_HBQS)
2682                 return NULL;
2683
2684         spin_lock_irq(&phba->hbalock);
2685         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2686                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2687                 if (hbq_buf->tag == tag) {
2688                         spin_unlock_irq(&phba->hbalock);
2689                         return hbq_buf;
2690                 }
2691         }
2692         spin_unlock_irq(&phba->hbalock);
2693         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2694                         "1803 Bad hbq tag. Data: x%x x%x\n",
2695                         tag, phba->hbqs[tag >> 16].buffer_count);
2696         return NULL;
2697 }
2698
2699 /**
2700  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2701  * @phba: Pointer to HBA context object.
2702  * @hbq_buffer: Pointer to HBQ buffer.
2703  *
2704  * This function is called with hbalock. This function gives back
2705  * the hbq buffer to firmware. If the HBQ does not have space to
2706  * post the buffer, it will free the buffer.
2707  **/
2708 void
2709 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2710 {
2711         uint32_t hbqno;
2712
2713         if (hbq_buffer) {
2714                 hbqno = hbq_buffer->tag >> 16;
2715                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2716                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2717         }
2718 }
2719
2720 /**
2721  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2722  * @mbxCommand: mailbox command code.
2723  *
2724  * This function is called by the mailbox event handler function to verify
2725  * that the completed mailbox command is a legitimate mailbox command. If the
2726  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2727  * and the mailbox event handler will take the HBA offline.
2728  **/
2729 static int
2730 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2731 {
2732         uint8_t ret;
2733
2734         switch (mbxCommand) {
2735         case MBX_LOAD_SM:
2736         case MBX_READ_NV:
2737         case MBX_WRITE_NV:
2738         case MBX_WRITE_VPARMS:
2739         case MBX_RUN_BIU_DIAG:
2740         case MBX_INIT_LINK:
2741         case MBX_DOWN_LINK:
2742         case MBX_CONFIG_LINK:
2743         case MBX_CONFIG_RING:
2744         case MBX_RESET_RING:
2745         case MBX_READ_CONFIG:
2746         case MBX_READ_RCONFIG:
2747         case MBX_READ_SPARM:
2748         case MBX_READ_STATUS:
2749         case MBX_READ_RPI:
2750         case MBX_READ_XRI:
2751         case MBX_READ_REV:
2752         case MBX_READ_LNK_STAT:
2753         case MBX_REG_LOGIN:
2754         case MBX_UNREG_LOGIN:
2755         case MBX_CLEAR_LA:
2756         case MBX_DUMP_MEMORY:
2757         case MBX_DUMP_CONTEXT:
2758         case MBX_RUN_DIAGS:
2759         case MBX_RESTART:
2760         case MBX_UPDATE_CFG:
2761         case MBX_DOWN_LOAD:
2762         case MBX_DEL_LD_ENTRY:
2763         case MBX_RUN_PROGRAM:
2764         case MBX_SET_MASK:
2765         case MBX_SET_VARIABLE:
2766         case MBX_UNREG_D_ID:
2767         case MBX_KILL_BOARD:
2768         case MBX_CONFIG_FARP:
2769         case MBX_BEACON:
2770         case MBX_LOAD_AREA:
2771         case MBX_RUN_BIU_DIAG64:
2772         case MBX_CONFIG_PORT:
2773         case MBX_READ_SPARM64:
2774         case MBX_READ_RPI64:
2775         case MBX_REG_LOGIN64:
2776         case MBX_READ_TOPOLOGY:
2777         case MBX_WRITE_WWN:
2778         case MBX_SET_DEBUG:
2779         case MBX_LOAD_EXP_ROM:
2780         case MBX_ASYNCEVT_ENABLE:
2781         case MBX_REG_VPI:
2782         case MBX_UNREG_VPI:
2783         case MBX_HEARTBEAT:
2784         case MBX_PORT_CAPABILITIES:
2785         case MBX_PORT_IOV_CONTROL:
2786         case MBX_SLI4_CONFIG:
2787         case MBX_SLI4_REQ_FTRS:
2788         case MBX_REG_FCFI:
2789         case MBX_UNREG_FCFI:
2790         case MBX_REG_VFI:
2791         case MBX_UNREG_VFI:
2792         case MBX_INIT_VPI:
2793         case MBX_INIT_VFI:
2794         case MBX_RESUME_RPI:
2795         case MBX_READ_EVENT_LOG_STATUS:
2796         case MBX_READ_EVENT_LOG:
2797         case MBX_SECURITY_MGMT:
2798         case MBX_AUTH_PORT:
2799         case MBX_ACCESS_VDATA:
2800                 ret = mbxCommand;
2801                 break;
2802         default:
2803                 ret = MBX_SHUTDOWN;
2804                 break;
2805         }
2806         return ret;
2807 }
2808
2809 /**
2810  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2811  * @phba: Pointer to HBA context object.
2812  * @pmboxq: Pointer to mailbox command.
2813  *
2814  * This is completion handler function for mailbox commands issued from
2815  * lpfc_sli_issue_mbox_wait function. This function is called by the
2816  * mailbox event handler function with no lock held. This function
2817  * will wake up thread waiting on the wait queue pointed by context1
2818  * of the mailbox.
2819  **/
2820 void
2821 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2822 {
2823         unsigned long drvr_flag;
2824         struct completion *pmbox_done;
2825
2826         /*
2827          * If pmbox_done is empty, the driver thread gave up waiting and
2828          * continued running.
2829          */
2830         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2831         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2832         pmbox_done = (struct completion *)pmboxq->context3;
2833         if (pmbox_done)
2834                 complete(pmbox_done);
2835         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2836         return;
2837 }
2838
2839 static void
2840 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2841 {
2842         unsigned long iflags;
2843
2844         if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2845                 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2846                 spin_lock_irqsave(&ndlp->lock, iflags);
2847                 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2848                 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2849                 spin_unlock_irqrestore(&ndlp->lock, iflags);
2850         }
2851         ndlp->nlp_flag &= ~NLP_UNREG_INP;
2852 }
2853
2854 void
2855 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2856 {
2857         __lpfc_sli_rpi_release(vport, ndlp);
2858 }
2859
2860 /**
2861  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2862  * @phba: Pointer to HBA context object.
2863  * @pmb: Pointer to mailbox object.
2864  *
2865  * This function is the default mailbox completion handler. It
2866  * frees the memory resources associated with the completed mailbox
2867  * command. If the completed command is a REG_LOGIN mailbox command,
2868  * this function will issue a UREG_LOGIN to re-claim the RPI.
2869  **/
2870 void
2871 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2872 {
2873         struct lpfc_vport  *vport = pmb->vport;
2874         struct lpfc_dmabuf *mp;
2875         struct lpfc_nodelist *ndlp;
2876         struct Scsi_Host *shost;
2877         uint16_t rpi, vpi;
2878         int rc;
2879
2880         /*
2881          * If a REG_LOGIN succeeded  after node is destroyed or node
2882          * is in re-discovery driver need to cleanup the RPI.
2883          */
2884         if (!(phba->pport->load_flag & FC_UNLOADING) &&
2885             pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2886             !pmb->u.mb.mbxStatus) {
2887                 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
2888                 if (mp) {
2889                         pmb->ctx_buf = NULL;
2890                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2891                         kfree(mp);
2892                 }
2893                 rpi = pmb->u.mb.un.varWords[0];
2894                 vpi = pmb->u.mb.un.varRegLogin.vpi;
2895                 if (phba->sli_rev == LPFC_SLI_REV4)
2896                         vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2897                 lpfc_unreg_login(phba, vpi, rpi, pmb);
2898                 pmb->vport = vport;
2899                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2900                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2901                 if (rc != MBX_NOT_FINISHED)
2902                         return;
2903         }
2904
2905         if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2906                 !(phba->pport->load_flag & FC_UNLOADING) &&
2907                 !pmb->u.mb.mbxStatus) {
2908                 shost = lpfc_shost_from_vport(vport);
2909                 spin_lock_irq(shost->host_lock);
2910                 vport->vpi_state |= LPFC_VPI_REGISTERED;
2911                 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2912                 spin_unlock_irq(shost->host_lock);
2913         }
2914
2915         if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2916                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2917                 lpfc_nlp_put(ndlp);
2918         }
2919
2920         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2921                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2922
2923                 /* Check to see if there are any deferred events to process */
2924                 if (ndlp) {
2925                         lpfc_printf_vlog(
2926                                 vport,
2927                                 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2928                                 "1438 UNREG cmpl deferred mbox x%x "
2929                                 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2930                                 ndlp->nlp_rpi, ndlp->nlp_DID,
2931                                 ndlp->nlp_flag, ndlp->nlp_defer_did,
2932                                 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2933
2934                         if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2935                             (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2936                                 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2937                                 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2938                                 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2939                         } else {
2940                                 __lpfc_sli_rpi_release(vport, ndlp);
2941                         }
2942
2943                         /* The unreg_login mailbox is complete and had a
2944                          * reference that has to be released.  The PLOGI
2945                          * got its own ref.
2946                          */
2947                         lpfc_nlp_put(ndlp);
2948                         pmb->ctx_ndlp = NULL;
2949                 }
2950         }
2951
2952         /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2953         if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2954                 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2955                 lpfc_nlp_put(ndlp);
2956         }
2957
2958         /* Check security permission status on INIT_LINK mailbox command */
2959         if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2960             (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2961                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2962                                 "2860 SLI authentication is required "
2963                                 "for INIT_LINK but has not done yet\n");
2964
2965         if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2966                 lpfc_sli4_mbox_cmd_free(phba, pmb);
2967         else
2968                 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2969 }
2970  /**
2971  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2972  * @phba: Pointer to HBA context object.
2973  * @pmb: Pointer to mailbox object.
2974  *
2975  * This function is the unreg rpi mailbox completion handler. It
2976  * frees the memory resources associated with the completed mailbox
2977  * command. An additional reference is put on the ndlp to prevent
2978  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2979  * the unreg mailbox command completes, this routine puts the
2980  * reference back.
2981  *
2982  **/
2983 void
2984 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2985 {
2986         struct lpfc_vport  *vport = pmb->vport;
2987         struct lpfc_nodelist *ndlp;
2988
2989         ndlp = pmb->ctx_ndlp;
2990         if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2991                 if (phba->sli_rev == LPFC_SLI_REV4 &&
2992                     (bf_get(lpfc_sli_intf_if_type,
2993                      &phba->sli4_hba.sli_intf) >=
2994                      LPFC_SLI_INTF_IF_TYPE_2)) {
2995                         if (ndlp) {
2996                                 lpfc_printf_vlog(
2997                                          vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2998                                          "0010 UNREG_LOGIN vpi:%x "
2999                                          "rpi:%x DID:%x defer x%x flg x%x "
3000                                          "x%px\n",
3001                                          vport->vpi, ndlp->nlp_rpi,
3002                                          ndlp->nlp_DID, ndlp->nlp_defer_did,
3003                                          ndlp->nlp_flag,
3004                                          ndlp);
3005                                 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3006
3007                                 /* Check to see if there are any deferred
3008                                  * events to process
3009                                  */
3010                                 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
3011                                     (ndlp->nlp_defer_did !=
3012                                     NLP_EVT_NOTHING_PENDING)) {
3013                                         lpfc_printf_vlog(
3014                                                 vport, KERN_INFO, LOG_DISCOVERY,
3015                                                 "4111 UNREG cmpl deferred "
3016                                                 "clr x%x on "
3017                                                 "NPort x%x Data: x%x x%px\n",
3018                                                 ndlp->nlp_rpi, ndlp->nlp_DID,
3019                                                 ndlp->nlp_defer_did, ndlp);
3020                                         ndlp->nlp_flag &= ~NLP_UNREG_INP;
3021                                         ndlp->nlp_defer_did =
3022                                                 NLP_EVT_NOTHING_PENDING;
3023                                         lpfc_issue_els_plogi(
3024                                                 vport, ndlp->nlp_DID, 0);
3025                                 } else {
3026                                         __lpfc_sli_rpi_release(vport, ndlp);
3027                                 }
3028                                 lpfc_nlp_put(ndlp);
3029                         }
3030                 }
3031         }
3032
3033         mempool_free(pmb, phba->mbox_mem_pool);
3034 }
3035
3036 /**
3037  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3038  * @phba: Pointer to HBA context object.
3039  *
3040  * This function is called with no lock held. This function processes all
3041  * the completed mailbox commands and gives it to upper layers. The interrupt
3042  * service routine processes mailbox completion interrupt and adds completed
3043  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3044  * Worker thread call lpfc_sli_handle_mb_event, which will return the
3045  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3046  * function returns the mailbox commands to the upper layer by calling the
3047  * completion handler function of each mailbox.
3048  **/
3049 int
3050 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3051 {
3052         MAILBOX_t *pmbox;
3053         LPFC_MBOXQ_t *pmb;
3054         int rc;
3055         LIST_HEAD(cmplq);
3056
3057         phba->sli.slistat.mbox_event++;
3058
3059         /* Get all completed mailboxe buffers into the cmplq */
3060         spin_lock_irq(&phba->hbalock);
3061         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3062         spin_unlock_irq(&phba->hbalock);
3063
3064         /* Get a Mailbox buffer to setup mailbox commands for callback */
3065         do {
3066                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3067                 if (pmb == NULL)
3068                         break;
3069
3070                 pmbox = &pmb->u.mb;
3071
3072                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3073                         if (pmb->vport) {
3074                                 lpfc_debugfs_disc_trc(pmb->vport,
3075                                         LPFC_DISC_TRC_MBOX_VPORT,
3076                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3077                                         (uint32_t)pmbox->mbxCommand,
3078                                         pmbox->un.varWords[0],
3079                                         pmbox->un.varWords[1]);
3080                         }
3081                         else {
3082                                 lpfc_debugfs_disc_trc(phba->pport,
3083                                         LPFC_DISC_TRC_MBOX,
3084                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
3085                                         (uint32_t)pmbox->mbxCommand,
3086                                         pmbox->un.varWords[0],
3087                                         pmbox->un.varWords[1]);
3088                         }
3089                 }
3090
3091                 /*
3092                  * It is a fatal error if unknown mbox command completion.
3093                  */
3094                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3095                     MBX_SHUTDOWN) {
3096                         /* Unknown mailbox command compl */
3097                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3098                                         "(%d):0323 Unknown Mailbox command "
3099                                         "x%x (x%x/x%x) Cmpl\n",
3100                                         pmb->vport ? pmb->vport->vpi :
3101                                         LPFC_VPORT_UNKNOWN,
3102                                         pmbox->mbxCommand,
3103                                         lpfc_sli_config_mbox_subsys_get(phba,
3104                                                                         pmb),
3105                                         lpfc_sli_config_mbox_opcode_get(phba,
3106                                                                         pmb));
3107                         phba->link_state = LPFC_HBA_ERROR;
3108                         phba->work_hs = HS_FFER3;
3109                         lpfc_handle_eratt(phba);
3110                         continue;
3111                 }
3112
3113                 if (pmbox->mbxStatus) {
3114                         phba->sli.slistat.mbox_stat_err++;
3115                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3116                                 /* Mbox cmd cmpl error - RETRYing */
3117                                 lpfc_printf_log(phba, KERN_INFO,
3118                                         LOG_MBOX | LOG_SLI,
3119                                         "(%d):0305 Mbox cmd cmpl "
3120                                         "error - RETRYing Data: x%x "
3121                                         "(x%x/x%x) x%x x%x x%x\n",
3122                                         pmb->vport ? pmb->vport->vpi :
3123                                         LPFC_VPORT_UNKNOWN,
3124                                         pmbox->mbxCommand,
3125                                         lpfc_sli_config_mbox_subsys_get(phba,
3126                                                                         pmb),
3127                                         lpfc_sli_config_mbox_opcode_get(phba,
3128                                                                         pmb),
3129                                         pmbox->mbxStatus,
3130                                         pmbox->un.varWords[0],
3131                                         pmb->vport ? pmb->vport->port_state :
3132                                         LPFC_VPORT_UNKNOWN);
3133                                 pmbox->mbxStatus = 0;
3134                                 pmbox->mbxOwner = OWN_HOST;
3135                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3136                                 if (rc != MBX_NOT_FINISHED)
3137                                         continue;
3138                         }
3139                 }
3140
3141                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
3142                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3143                                 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3144                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3145                                 "x%x x%x x%x\n",
3146                                 pmb->vport ? pmb->vport->vpi : 0,
3147                                 pmbox->mbxCommand,
3148                                 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3149                                 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3150                                 pmb->mbox_cmpl,
3151                                 *((uint32_t *) pmbox),
3152                                 pmbox->un.varWords[0],
3153                                 pmbox->un.varWords[1],
3154                                 pmbox->un.varWords[2],
3155                                 pmbox->un.varWords[3],
3156                                 pmbox->un.varWords[4],
3157                                 pmbox->un.varWords[5],
3158                                 pmbox->un.varWords[6],
3159                                 pmbox->un.varWords[7],
3160                                 pmbox->un.varWords[8],
3161                                 pmbox->un.varWords[9],
3162                                 pmbox->un.varWords[10]);
3163
3164                 if (pmb->mbox_cmpl)
3165                         pmb->mbox_cmpl(phba,pmb);
3166         } while (1);
3167         return 0;
3168 }
3169
3170 /**
3171  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3172  * @phba: Pointer to HBA context object.
3173  * @pring: Pointer to driver SLI ring object.
3174  * @tag: buffer tag.
3175  *
3176  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3177  * is set in the tag the buffer is posted for a particular exchange,
3178  * the function will return the buffer without replacing the buffer.
3179  * If the buffer is for unsolicited ELS or CT traffic, this function
3180  * returns the buffer and also posts another buffer to the firmware.
3181  **/
3182 static struct lpfc_dmabuf *
3183 lpfc_sli_get_buff(struct lpfc_hba *phba,
3184                   struct lpfc_sli_ring *pring,
3185                   uint32_t tag)
3186 {
3187         struct hbq_dmabuf *hbq_entry;
3188
3189         if (tag & QUE_BUFTAG_BIT)
3190                 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3191         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3192         if (!hbq_entry)
3193                 return NULL;
3194         return &hbq_entry->dbuf;
3195 }
3196
3197 /**
3198  * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3199  *                              containing a NVME LS request.
3200  * @phba: pointer to lpfc hba data structure.
3201  * @piocb: pointer to the iocbq struct representing the sequence starting
3202  *        frame.
3203  *
3204  * This routine initially validates the NVME LS, validates there is a login
3205  * with the port that sent the LS, and then calls the appropriate nvme host
3206  * or target LS request handler.
3207  **/
3208 static void
3209 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3210 {
3211         struct lpfc_nodelist *ndlp;
3212         struct lpfc_dmabuf *d_buf;
3213         struct hbq_dmabuf *nvmebuf;
3214         struct fc_frame_header *fc_hdr;
3215         struct lpfc_async_xchg_ctx *axchg = NULL;
3216         char *failwhy = NULL;
3217         uint32_t oxid, sid, did, fctl, size;
3218         int ret = 1;
3219
3220         d_buf = piocb->cmd_dmabuf;
3221
3222         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3223         fc_hdr = nvmebuf->hbuf.virt;
3224         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3225         sid = sli4_sid_from_fc_hdr(fc_hdr);
3226         did = sli4_did_from_fc_hdr(fc_hdr);
3227         fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3228                 fc_hdr->fh_f_ctl[1] << 8 |
3229                 fc_hdr->fh_f_ctl[2]);
3230         size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3231
3232         lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
3233                          oxid, size, sid);
3234
3235         if (phba->pport->load_flag & FC_UNLOADING) {
3236                 failwhy = "Driver Unloading";
3237         } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3238                 failwhy = "NVME FC4 Disabled";
3239         } else if (!phba->nvmet_support && !phba->pport->localport) {
3240                 failwhy = "No Localport";
3241         } else if (phba->nvmet_support && !phba->targetport) {
3242                 failwhy = "No Targetport";
3243         } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3244                 failwhy = "Bad NVME LS R_CTL";
3245         } else if (unlikely((fctl & 0x00FF0000) !=
3246                         (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3247                 failwhy = "Bad NVME LS F_CTL";
3248         } else {
3249                 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3250                 if (!axchg)
3251                         failwhy = "No CTX memory";
3252         }
3253
3254         if (unlikely(failwhy)) {
3255                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3256                                 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3257                                 sid, oxid, failwhy);
3258                 goto out_fail;
3259         }
3260
3261         /* validate the source of the LS is logged in */
3262         ndlp = lpfc_findnode_did(phba->pport, sid);
3263         if (!ndlp ||
3264             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3265              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3266                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3267                                 "6216 NVME Unsol rcv: No ndlp: "
3268                                 "NPort_ID x%x oxid x%x\n",
3269                                 sid, oxid);
3270                 goto out_fail;
3271         }
3272
3273         axchg->phba = phba;
3274         axchg->ndlp = ndlp;
3275         axchg->size = size;
3276         axchg->oxid = oxid;
3277         axchg->sid = sid;
3278         axchg->wqeq = NULL;
3279         axchg->state = LPFC_NVME_STE_LS_RCV;
3280         axchg->entry_cnt = 1;
3281         axchg->rqb_buffer = (void *)nvmebuf;
3282         axchg->hdwq = &phba->sli4_hba.hdwq[0];
3283         axchg->payload = nvmebuf->dbuf.virt;
3284         INIT_LIST_HEAD(&axchg->list);
3285
3286         if (phba->nvmet_support) {
3287                 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3288                 spin_lock_irq(&ndlp->lock);
3289                 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3290                         ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3291                         spin_unlock_irq(&ndlp->lock);
3292
3293                         /* This reference is a single occurrence to hold the
3294                          * node valid until the nvmet transport calls
3295                          * host_release.
3296                          */
3297                         if (!lpfc_nlp_get(ndlp))
3298                                 goto out_fail;
3299
3300                         lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3301                                         "6206 NVMET unsol ls_req ndlp x%px "
3302                                         "DID x%x xflags x%x refcnt %d\n",
3303                                         ndlp, ndlp->nlp_DID,
3304                                         ndlp->fc4_xpt_flags,
3305                                         kref_read(&ndlp->kref));
3306                 } else {
3307                         spin_unlock_irq(&ndlp->lock);
3308                 }
3309         } else {
3310                 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3311         }
3312
3313         /* if zero, LS was successfully handled. If non-zero, LS not handled */
3314         if (!ret)
3315                 return;
3316
3317 out_fail:
3318         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3319                         "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3320                         "NVMe%s handler failed %d\n",
3321                         did, sid, oxid,
3322                         (phba->nvmet_support) ? "T" : "I", ret);
3323
3324         /* recycle receive buffer */
3325         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3326
3327         /* If start of new exchange, abort it */
3328         if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3329                 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3330
3331         if (ret)
3332                 kfree(axchg);
3333 }
3334
3335 /**
3336  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3337  * @phba: Pointer to HBA context object.
3338  * @pring: Pointer to driver SLI ring object.
3339  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3340  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3341  * @fch_type: the type for the first frame of the sequence.
3342  *
3343  * This function is called with no lock held. This function uses the r_ctl and
3344  * type of the received sequence to find the correct callback function to call
3345  * to process the sequence.
3346  **/
3347 static int
3348 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3349                          struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3350                          uint32_t fch_type)
3351 {
3352         int i;
3353
3354         switch (fch_type) {
3355         case FC_TYPE_NVME:
3356                 lpfc_nvme_unsol_ls_handler(phba, saveq);
3357                 return 1;
3358         default:
3359                 break;
3360         }
3361
3362         /* unSolicited Responses */
3363         if (pring->prt[0].profile) {
3364                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3365                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3366                                                                         saveq);
3367                 return 1;
3368         }
3369         /* We must search, based on rctl / type
3370            for the right routine */
3371         for (i = 0; i < pring->num_mask; i++) {
3372                 if ((pring->prt[i].rctl == fch_r_ctl) &&
3373                     (pring->prt[i].type == fch_type)) {
3374                         if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3375                                 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3376                                                 (phba, pring, saveq);
3377                         return 1;
3378                 }
3379         }
3380         return 0;
3381 }
3382
3383 static void
3384 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3385                         struct lpfc_iocbq *saveq)
3386 {
3387         IOCB_t *irsp;
3388         union lpfc_wqe128 *wqe;
3389         u16 i = 0;
3390
3391         irsp = &saveq->iocb;
3392         wqe = &saveq->wqe;
3393
3394         /* Fill wcqe with the IOCB status fields */
3395         bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3396         saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3397         saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3398         saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3399
3400         /* Source ID */
3401         bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3402
3403         /* rx-id of the response frame */
3404         bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3405
3406         /* ox-id of the frame */
3407         bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3408                irsp->unsli3.rcvsli3.ox_id);
3409
3410         /* DID */
3411         bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3412                irsp->un.rcvels.remoteID);
3413
3414         /* unsol data len */
3415         for (i = 0; i < irsp->ulpBdeCount; i++) {
3416                 struct lpfc_hbq_entry *hbqe = NULL;
3417
3418                 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3419                         if (i == 0) {
3420                                 hbqe = (struct lpfc_hbq_entry *)
3421                                         &irsp->un.ulpWord[0];
3422                                 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3423                                         hbqe->bde.tus.f.bdeSize;
3424                         } else if (i == 1) {
3425                                 hbqe = (struct lpfc_hbq_entry *)
3426                                         &irsp->unsli3.sli3Words[4];
3427                                 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3428                         }
3429                 }
3430         }
3431 }
3432
3433 /**
3434  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3435  * @phba: Pointer to HBA context object.
3436  * @pring: Pointer to driver SLI ring object.
3437  * @saveq: Pointer to the unsolicited iocb.
3438  *
3439  * This function is called with no lock held by the ring event handler
3440  * when there is an unsolicited iocb posted to the response ring by the
3441  * firmware. This function gets the buffer associated with the iocbs
3442  * and calls the event handler for the ring. This function handles both
3443  * qring buffers and hbq buffers.
3444  * When the function returns 1 the caller can free the iocb object otherwise
3445  * upper layer functions will free the iocb objects.
3446  **/
3447 static int
3448 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3449                             struct lpfc_iocbq *saveq)
3450 {
3451         IOCB_t           * irsp;
3452         WORD5            * w5p;
3453         dma_addr_t       paddr;
3454         uint32_t           Rctl, Type;
3455         struct lpfc_iocbq *iocbq;
3456         struct lpfc_dmabuf *dmzbuf;
3457
3458         irsp = &saveq->iocb;
3459         saveq->vport = phba->pport;
3460
3461         if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3462                 if (pring->lpfc_sli_rcv_async_status)
3463                         pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3464                 else
3465                         lpfc_printf_log(phba,
3466                                         KERN_WARNING,
3467                                         LOG_SLI,
3468                                         "0316 Ring %d handler: unexpected "
3469                                         "ASYNC_STATUS iocb received evt_code "
3470                                         "0x%x\n",
3471                                         pring->ringno,
3472                                         irsp->un.asyncstat.evt_code);
3473                 return 1;
3474         }
3475
3476         if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3477             (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3478                 if (irsp->ulpBdeCount > 0) {
3479                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3480                                                    irsp->un.ulpWord[3]);
3481                         lpfc_in_buf_free(phba, dmzbuf);
3482                 }
3483
3484                 if (irsp->ulpBdeCount > 1) {
3485                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3486                                                    irsp->unsli3.sli3Words[3]);
3487                         lpfc_in_buf_free(phba, dmzbuf);
3488                 }
3489
3490                 if (irsp->ulpBdeCount > 2) {
3491                         dmzbuf = lpfc_sli_get_buff(phba, pring,
3492                                                    irsp->unsli3.sli3Words[7]);
3493                         lpfc_in_buf_free(phba, dmzbuf);
3494                 }
3495
3496                 return 1;
3497         }
3498
3499         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3500                 if (irsp->ulpBdeCount != 0) {
3501                         saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3502                                                 irsp->un.ulpWord[3]);
3503                         if (!saveq->cmd_dmabuf)
3504                                 lpfc_printf_log(phba,
3505                                         KERN_ERR,
3506                                         LOG_SLI,
3507                                         "0341 Ring %d Cannot find buffer for "
3508                                         "an unsolicited iocb. tag 0x%x\n",
3509                                         pring->ringno,
3510                                         irsp->un.ulpWord[3]);
3511                 }
3512                 if (irsp->ulpBdeCount == 2) {
3513                         saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3514                                                 irsp->unsli3.sli3Words[7]);
3515                         if (!saveq->bpl_dmabuf)
3516                                 lpfc_printf_log(phba,
3517                                         KERN_ERR,
3518                                         LOG_SLI,
3519                                         "0342 Ring %d Cannot find buffer for an"
3520                                         " unsolicited iocb. tag 0x%x\n",
3521                                         pring->ringno,
3522                                         irsp->unsli3.sli3Words[7]);
3523                 }
3524                 list_for_each_entry(iocbq, &saveq->list, list) {
3525                         irsp = &iocbq->iocb;
3526                         if (irsp->ulpBdeCount != 0) {
3527                                 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3528                                                         pring,
3529                                                         irsp->un.ulpWord[3]);
3530                                 if (!iocbq->cmd_dmabuf)
3531                                         lpfc_printf_log(phba,
3532                                                 KERN_ERR,
3533                                                 LOG_SLI,
3534                                                 "0343 Ring %d Cannot find "
3535                                                 "buffer for an unsolicited iocb"
3536                                                 ". tag 0x%x\n", pring->ringno,
3537                                                 irsp->un.ulpWord[3]);
3538                         }
3539                         if (irsp->ulpBdeCount == 2) {
3540                                 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3541                                                 pring,
3542                                                 irsp->unsli3.sli3Words[7]);
3543                                 if (!iocbq->bpl_dmabuf)
3544                                         lpfc_printf_log(phba,
3545                                                 KERN_ERR,
3546                                                 LOG_SLI,
3547                                                 "0344 Ring %d Cannot find "
3548                                                 "buffer for an unsolicited "
3549                                                 "iocb. tag 0x%x\n",
3550                                                 pring->ringno,
3551                                                 irsp->unsli3.sli3Words[7]);
3552                         }
3553                 }
3554         } else {
3555                 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3556                                  irsp->un.cont64[0].addrLow);
3557                 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3558                                                              paddr);
3559                 if (irsp->ulpBdeCount == 2) {
3560                         paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3561                                          irsp->un.cont64[1].addrLow);
3562                         saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3563                                                                    pring,
3564                                                                    paddr);
3565                 }
3566         }
3567
3568         if (irsp->ulpBdeCount != 0 &&
3569             (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3570              irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3571                 int found = 0;
3572
3573                 /* search continue save q for same XRI */
3574                 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3575                         if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3576                                 saveq->iocb.unsli3.rcvsli3.ox_id) {
3577                                 list_add_tail(&saveq->list, &iocbq->list);
3578                                 found = 1;
3579                                 break;
3580                         }
3581                 }
3582                 if (!found)
3583                         list_add_tail(&saveq->clist,
3584                                       &pring->iocb_continue_saveq);
3585
3586                 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3587                         list_del_init(&iocbq->clist);
3588                         saveq = iocbq;
3589                         irsp = &saveq->iocb;
3590                 } else {
3591                         return 0;
3592                 }
3593         }
3594         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3595             (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3596             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3597                 Rctl = FC_RCTL_ELS_REQ;
3598                 Type = FC_TYPE_ELS;
3599         } else {
3600                 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3601                 Rctl = w5p->hcsw.Rctl;
3602                 Type = w5p->hcsw.Type;
3603
3604                 /* Firmware Workaround */
3605                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3606                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3607                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3608                         Rctl = FC_RCTL_ELS_REQ;
3609                         Type = FC_TYPE_ELS;
3610                         w5p->hcsw.Rctl = Rctl;
3611                         w5p->hcsw.Type = Type;
3612                 }
3613         }
3614
3615         if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3616             (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3617             irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3618                 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3619                         saveq->vport = phba->pport;
3620                 else
3621                         saveq->vport = lpfc_find_vport_by_vpid(phba,
3622                                                irsp->unsli3.rcvsli3.vpi);
3623         }
3624
3625         /* Prepare WQE with Unsol frame */
3626         lpfc_sli_prep_unsol_wqe(phba, saveq);
3627
3628         if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3629                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3630                                 "0313 Ring %d handler: unexpected Rctl x%x "
3631                                 "Type x%x received\n",
3632                                 pring->ringno, Rctl, Type);
3633
3634         return 1;
3635 }
3636
3637 /**
3638  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3639  * @phba: Pointer to HBA context object.
3640  * @pring: Pointer to driver SLI ring object.
3641  * @prspiocb: Pointer to response iocb object.
3642  *
3643  * This function looks up the iocb_lookup table to get the command iocb
3644  * corresponding to the given response iocb using the iotag of the
3645  * response iocb. The driver calls this function with the hbalock held
3646  * for SLI3 ports or the ring lock held for SLI4 ports.
3647  * This function returns the command iocb object if it finds the command
3648  * iocb else returns NULL.
3649  **/
3650 static struct lpfc_iocbq *
3651 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3652                       struct lpfc_sli_ring *pring,
3653                       struct lpfc_iocbq *prspiocb)
3654 {
3655         struct lpfc_iocbq *cmd_iocb = NULL;
3656         u16 iotag;
3657
3658         if (phba->sli_rev == LPFC_SLI_REV4)
3659                 iotag = get_wqe_reqtag(prspiocb);
3660         else
3661                 iotag = prspiocb->iocb.ulpIoTag;
3662
3663         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3664                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3665                 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3666                         /* remove from txcmpl queue list */
3667                         list_del_init(&cmd_iocb->list);
3668                         cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3669                         pring->txcmplq_cnt--;
3670                         return cmd_iocb;
3671                 }
3672         }
3673
3674         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3675                         "0317 iotag x%x is out of "
3676                         "range: max iotag x%x\n",
3677                         iotag, phba->sli.last_iotag);
3678         return NULL;
3679 }
3680
3681 /**
3682  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3683  * @phba: Pointer to HBA context object.
3684  * @pring: Pointer to driver SLI ring object.
3685  * @iotag: IOCB tag.
3686  *
3687  * This function looks up the iocb_lookup table to get the command iocb
3688  * corresponding to the given iotag. The driver calls this function with
3689  * the ring lock held because this function is an SLI4 port only helper.
3690  * This function returns the command iocb object if it finds the command
3691  * iocb else returns NULL.
3692  **/
3693 static struct lpfc_iocbq *
3694 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3695                              struct lpfc_sli_ring *pring, uint16_t iotag)
3696 {
3697         struct lpfc_iocbq *cmd_iocb = NULL;
3698
3699         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3700                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3701                 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3702                         /* remove from txcmpl queue list */
3703                         list_del_init(&cmd_iocb->list);
3704                         cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3705                         pring->txcmplq_cnt--;
3706                         return cmd_iocb;
3707                 }
3708         }
3709
3710         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3711                         "0372 iotag x%x lookup error: max iotag (x%x) "
3712                         "cmd_flag x%x\n",
3713                         iotag, phba->sli.last_iotag,
3714                         cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3715         return NULL;
3716 }
3717
3718 /**
3719  * lpfc_sli_process_sol_iocb - process solicited iocb completion
3720  * @phba: Pointer to HBA context object.
3721  * @pring: Pointer to driver SLI ring object.
3722  * @saveq: Pointer to the response iocb to be processed.
3723  *
3724  * This function is called by the ring event handler for non-fcp
3725  * rings when there is a new response iocb in the response ring.
3726  * The caller is not required to hold any locks. This function
3727  * gets the command iocb associated with the response iocb and
3728  * calls the completion handler for the command iocb. If there
3729  * is no completion handler, the function will free the resources
3730  * associated with command iocb. If the response iocb is for
3731  * an already aborted command iocb, the status of the completion
3732  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3733  * This function always returns 1.
3734  **/
3735 static int
3736 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3737                           struct lpfc_iocbq *saveq)
3738 {
3739         struct lpfc_iocbq *cmdiocbp;
3740         unsigned long iflag;
3741         u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3742
3743         if (phba->sli_rev == LPFC_SLI_REV4)
3744                 spin_lock_irqsave(&pring->ring_lock, iflag);
3745         else
3746                 spin_lock_irqsave(&phba->hbalock, iflag);
3747         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3748         if (phba->sli_rev == LPFC_SLI_REV4)
3749                 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3750         else
3751                 spin_unlock_irqrestore(&phba->hbalock, iflag);
3752
3753         ulp_command = get_job_cmnd(phba, saveq);
3754         ulp_status = get_job_ulpstatus(phba, saveq);
3755         ulp_word4 = get_job_word4(phba, saveq);
3756         ulp_context = get_job_ulpcontext(phba, saveq);
3757         if (phba->sli_rev == LPFC_SLI_REV4)
3758                 iotag = get_wqe_reqtag(saveq);
3759         else
3760                 iotag = saveq->iocb.ulpIoTag;
3761
3762         if (cmdiocbp) {
3763                 ulp_command = get_job_cmnd(phba, cmdiocbp);
3764                 if (cmdiocbp->cmd_cmpl) {
3765                         /*
3766                          * If an ELS command failed send an event to mgmt
3767                          * application.
3768                          */
3769                         if (ulp_status &&
3770                              (pring->ringno == LPFC_ELS_RING) &&
3771                              (ulp_command == CMD_ELS_REQUEST64_CR))
3772                                 lpfc_send_els_failure_event(phba,
3773                                         cmdiocbp, saveq);
3774
3775                         /*
3776                          * Post all ELS completions to the worker thread.
3777                          * All other are passed to the completion callback.
3778                          */
3779                         if (pring->ringno == LPFC_ELS_RING) {
3780                                 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3781                                     (cmdiocbp->cmd_flag &
3782                                                         LPFC_DRIVER_ABORTED)) {
3783                                         spin_lock_irqsave(&phba->hbalock,
3784                                                           iflag);
3785                                         cmdiocbp->cmd_flag &=
3786                                                 ~LPFC_DRIVER_ABORTED;
3787                                         spin_unlock_irqrestore(&phba->hbalock,
3788                                                                iflag);
3789                                         saveq->iocb.ulpStatus =
3790                                                 IOSTAT_LOCAL_REJECT;
3791                                         saveq->iocb.un.ulpWord[4] =
3792                                                 IOERR_SLI_ABORTED;
3793
3794                                         /* Firmware could still be in progress
3795                                          * of DMAing payload, so don't free data
3796                                          * buffer till after a hbeat.
3797                                          */
3798                                         spin_lock_irqsave(&phba->hbalock,
3799                                                           iflag);
3800                                         saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3801                                         spin_unlock_irqrestore(&phba->hbalock,
3802                                                                iflag);
3803                                 }
3804                                 if (phba->sli_rev == LPFC_SLI_REV4) {
3805                                         if (saveq->cmd_flag &
3806                                             LPFC_EXCHANGE_BUSY) {
3807                                                 /* Set cmdiocb flag for the
3808                                                  * exchange busy so sgl (xri)
3809                                                  * will not be released until
3810                                                  * the abort xri is received
3811                                                  * from hba.
3812                                                  */
3813                                                 spin_lock_irqsave(
3814                                                         &phba->hbalock, iflag);
3815                                                 cmdiocbp->cmd_flag |=
3816                                                         LPFC_EXCHANGE_BUSY;
3817                                                 spin_unlock_irqrestore(
3818                                                         &phba->hbalock, iflag);
3819                                         }
3820                                         if (cmdiocbp->cmd_flag &
3821                                             LPFC_DRIVER_ABORTED) {
3822                                                 /*
3823                                                  * Clear LPFC_DRIVER_ABORTED
3824                                                  * bit in case it was driver
3825                                                  * initiated abort.
3826                                                  */
3827                                                 spin_lock_irqsave(
3828                                                         &phba->hbalock, iflag);
3829                                                 cmdiocbp->cmd_flag &=
3830                                                         ~LPFC_DRIVER_ABORTED;
3831                                                 spin_unlock_irqrestore(
3832                                                         &phba->hbalock, iflag);
3833                                                 set_job_ulpstatus(cmdiocbp,
3834                                                                   IOSTAT_LOCAL_REJECT);
3835                                                 set_job_ulpword4(cmdiocbp,
3836                                                                  IOERR_ABORT_REQUESTED);
3837                                                 /*
3838                                                  * For SLI4, irspiocb contains
3839                                                  * NO_XRI in sli_xritag, it
3840                                                  * shall not affect releasing
3841                                                  * sgl (xri) process.
3842                                                  */
3843                                                 set_job_ulpstatus(saveq,
3844                                                                   IOSTAT_LOCAL_REJECT);
3845                                                 set_job_ulpword4(saveq,
3846                                                                  IOERR_SLI_ABORTED);
3847                                                 spin_lock_irqsave(
3848                                                         &phba->hbalock, iflag);
3849                                                 saveq->cmd_flag |=
3850                                                         LPFC_DELAY_MEM_FREE;
3851                                                 spin_unlock_irqrestore(
3852                                                         &phba->hbalock, iflag);
3853                                         }
3854                                 }
3855                         }
3856                         cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3857                 } else
3858                         lpfc_sli_release_iocbq(phba, cmdiocbp);
3859         } else {
3860                 /*
3861                  * Unknown initiating command based on the response iotag.
3862                  * This could be the case on the ELS ring because of
3863                  * lpfc_els_abort().
3864                  */
3865                 if (pring->ringno != LPFC_ELS_RING) {
3866                         /*
3867                          * Ring <ringno> handler: unexpected completion IoTag
3868                          * <IoTag>
3869                          */
3870                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3871                                          "0322 Ring %d handler: "
3872                                          "unexpected completion IoTag x%x "
3873                                          "Data: x%x x%x x%x x%x\n",
3874                                          pring->ringno, iotag, ulp_status,
3875                                          ulp_word4, ulp_command, ulp_context);
3876                 }
3877         }
3878
3879         return 1;
3880 }
3881
3882 /**
3883  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3884  * @phba: Pointer to HBA context object.
3885  * @pring: Pointer to driver SLI ring object.
3886  *
3887  * This function is called from the iocb ring event handlers when
3888  * put pointer is ahead of the get pointer for a ring. This function signal
3889  * an error attention condition to the worker thread and the worker
3890  * thread will transition the HBA to offline state.
3891  **/
3892 static void
3893 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3894 {
3895         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3896         /*
3897          * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3898          * rsp ring <portRspMax>
3899          */
3900         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3901                         "0312 Ring %d handler: portRspPut %d "
3902                         "is bigger than rsp ring %d\n",
3903                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
3904                         pring->sli.sli3.numRiocb);
3905
3906         phba->link_state = LPFC_HBA_ERROR;
3907
3908         /*
3909          * All error attention handlers are posted to
3910          * worker thread
3911          */
3912         phba->work_ha |= HA_ERATT;
3913         phba->work_hs = HS_FFER3;
3914
3915         lpfc_worker_wake_up(phba);
3916
3917         return;
3918 }
3919
3920 /**
3921  * lpfc_poll_eratt - Error attention polling timer timeout handler
3922  * @t: Context to fetch pointer to address of HBA context object from.
3923  *
3924  * This function is invoked by the Error Attention polling timer when the
3925  * timer times out. It will check the SLI Error Attention register for
3926  * possible attention events. If so, it will post an Error Attention event
3927  * and wake up worker thread to process it. Otherwise, it will set up the
3928  * Error Attention polling timer for the next poll.
3929  **/
3930 void lpfc_poll_eratt(struct timer_list *t)
3931 {
3932         struct lpfc_hba *phba;
3933         uint32_t eratt = 0;
3934         uint64_t sli_intr, cnt;
3935
3936         phba = from_timer(phba, t, eratt_poll);
3937
3938         /* Here we will also keep track of interrupts per sec of the hba */
3939         sli_intr = phba->sli.slistat.sli_intr;
3940
3941         if (phba->sli.slistat.sli_prev_intr > sli_intr)
3942                 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3943                         sli_intr);
3944         else
3945                 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3946
3947         /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3948         do_div(cnt, phba->eratt_poll_interval);
3949         phba->sli.slistat.sli_ips = cnt;
3950
3951         phba->sli.slistat.sli_prev_intr = sli_intr;
3952
3953         /* Check chip HA register for error event */
3954         eratt = lpfc_sli_check_eratt(phba);
3955
3956         if (eratt)
3957                 /* Tell the worker thread there is work to do */
3958                 lpfc_worker_wake_up(phba);
3959         else
3960                 /* Restart the timer for next eratt poll */
3961                 mod_timer(&phba->eratt_poll,
3962                           jiffies +
3963                           msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3964         return;
3965 }
3966
3967
3968 /**
3969  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3970  * @phba: Pointer to HBA context object.
3971  * @pring: Pointer to driver SLI ring object.
3972  * @mask: Host attention register mask for this ring.
3973  *
3974  * This function is called from the interrupt context when there is a ring
3975  * event for the fcp ring. The caller does not hold any lock.
3976  * The function processes each response iocb in the response ring until it
3977  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3978  * LE bit set. The function will call the completion handler of the command iocb
3979  * if the response iocb indicates a completion for a command iocb or it is
3980  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3981  * function if this is an unsolicited iocb.
3982  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3983  * to check it explicitly.
3984  */
3985 int
3986 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3987                                 struct lpfc_sli_ring *pring, uint32_t mask)
3988 {
3989         struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3990         IOCB_t *irsp = NULL;
3991         IOCB_t *entry = NULL;
3992         struct lpfc_iocbq *cmdiocbq = NULL;
3993         struct lpfc_iocbq rspiocbq;
3994         uint32_t status;
3995         uint32_t portRspPut, portRspMax;
3996         int rc = 1;
3997         lpfc_iocb_type type;
3998         unsigned long iflag;
3999         uint32_t rsp_cmpl = 0;
4000
4001         spin_lock_irqsave(&phba->hbalock, iflag);
4002         pring->stats.iocb_event++;
4003
4004         /*
4005          * The next available response entry should never exceed the maximum
4006          * entries.  If it does, treat it as an adapter hardware error.
4007          */
4008         portRspMax = pring->sli.sli3.numRiocb;
4009         portRspPut = le32_to_cpu(pgp->rspPutInx);
4010         if (unlikely(portRspPut >= portRspMax)) {
4011                 lpfc_sli_rsp_pointers_error(phba, pring);
4012                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4013                 return 1;
4014         }
4015         if (phba->fcp_ring_in_use) {
4016                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4017                 return 1;
4018         } else
4019                 phba->fcp_ring_in_use = 1;
4020
4021         rmb();
4022         while (pring->sli.sli3.rspidx != portRspPut) {
4023                 /*
4024                  * Fetch an entry off the ring and copy it into a local data
4025                  * structure.  The copy involves a byte-swap since the
4026                  * network byte order and pci byte orders are different.
4027                  */
4028                 entry = lpfc_resp_iocb(phba, pring);
4029                 phba->last_completion_time = jiffies;
4030
4031                 if (++pring->sli.sli3.rspidx >= portRspMax)
4032                         pring->sli.sli3.rspidx = 0;
4033
4034                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4035                                       (uint32_t *) &rspiocbq.iocb,
4036                                       phba->iocb_rsp_size);
4037                 INIT_LIST_HEAD(&(rspiocbq.list));
4038                 irsp = &rspiocbq.iocb;
4039
4040                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4041                 pring->stats.iocb_rsp++;
4042                 rsp_cmpl++;
4043
4044                 if (unlikely(irsp->ulpStatus)) {
4045                         /*
4046                          * If resource errors reported from HBA, reduce
4047                          * queuedepths of the SCSI device.
4048                          */
4049                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4050                             ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4051                              IOERR_NO_RESOURCES)) {
4052                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4053                                 phba->lpfc_rampdown_queue_depth(phba);
4054                                 spin_lock_irqsave(&phba->hbalock, iflag);
4055                         }
4056
4057                         /* Rsp ring <ringno> error: IOCB */
4058                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4059                                         "0336 Rsp Ring %d error: IOCB Data: "
4060                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4061                                         pring->ringno,
4062                                         irsp->un.ulpWord[0],
4063                                         irsp->un.ulpWord[1],
4064                                         irsp->un.ulpWord[2],
4065                                         irsp->un.ulpWord[3],
4066                                         irsp->un.ulpWord[4],
4067                                         irsp->un.ulpWord[5],
4068                                         *(uint32_t *)&irsp->un1,
4069                                         *((uint32_t *)&irsp->un1 + 1));
4070                 }
4071
4072                 switch (type) {
4073                 case LPFC_ABORT_IOCB:
4074                 case LPFC_SOL_IOCB:
4075                         /*
4076                          * Idle exchange closed via ABTS from port.  No iocb
4077                          * resources need to be recovered.
4078                          */
4079                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4080                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4081                                                 "0333 IOCB cmd 0x%x"
4082                                                 " processed. Skipping"
4083                                                 " completion\n",
4084                                                 irsp->ulpCommand);
4085                                 break;
4086                         }
4087
4088                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4089                                                          &rspiocbq);
4090                         if (unlikely(!cmdiocbq))
4091                                 break;
4092                         if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4093                                 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4094                         if (cmdiocbq->cmd_cmpl) {
4095                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4096                                 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4097                                 spin_lock_irqsave(&phba->hbalock, iflag);
4098                         }
4099                         break;
4100                 case LPFC_UNSOL_IOCB:
4101                         spin_unlock_irqrestore(&phba->hbalock, iflag);
4102                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4103                         spin_lock_irqsave(&phba->hbalock, iflag);
4104                         break;
4105                 default:
4106                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4107                                 char adaptermsg[LPFC_MAX_ADPTMSG];
4108                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4109                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4110                                        MAX_MSG_DATA);
4111                                 dev_warn(&((phba->pcidev)->dev),
4112                                          "lpfc%d: %s\n",
4113                                          phba->brd_no, adaptermsg);
4114                         } else {
4115                                 /* Unknown IOCB command */
4116                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4117                                                 "0334 Unknown IOCB command "
4118                                                 "Data: x%x, x%x x%x x%x x%x\n",
4119                                                 type, irsp->ulpCommand,
4120                                                 irsp->ulpStatus,
4121                                                 irsp->ulpIoTag,
4122                                                 irsp->ulpContext);
4123                         }
4124                         break;
4125                 }
4126
4127                 /*
4128                  * The response IOCB has been processed.  Update the ring
4129                  * pointer in SLIM.  If the port response put pointer has not
4130                  * been updated, sync the pgp->rspPutInx and fetch the new port
4131                  * response put pointer.
4132                  */
4133                 writel(pring->sli.sli3.rspidx,
4134                         &phba->host_gp[pring->ringno].rspGetInx);
4135
4136                 if (pring->sli.sli3.rspidx == portRspPut)
4137                         portRspPut = le32_to_cpu(pgp->rspPutInx);
4138         }
4139
4140         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4141                 pring->stats.iocb_rsp_full++;
4142                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4143                 writel(status, phba->CAregaddr);
4144                 readl(phba->CAregaddr);
4145         }
4146         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4147                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4148                 pring->stats.iocb_cmd_empty++;
4149
4150                 /* Force update of the local copy of cmdGetInx */
4151                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4152                 lpfc_sli_resume_iocb(phba, pring);
4153
4154                 if ((pring->lpfc_sli_cmd_available))
4155                         (pring->lpfc_sli_cmd_available) (phba, pring);
4156
4157         }
4158
4159         phba->fcp_ring_in_use = 0;
4160         spin_unlock_irqrestore(&phba->hbalock, iflag);
4161         return rc;
4162 }
4163
4164 /**
4165  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4166  * @phba: Pointer to HBA context object.
4167  * @pring: Pointer to driver SLI ring object.
4168  * @rspiocbp: Pointer to driver response IOCB object.
4169  *
4170  * This function is called from the worker thread when there is a slow-path
4171  * response IOCB to process. This function chains all the response iocbs until
4172  * seeing the iocb with the LE bit set. The function will call
4173  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4174  * completion of a command iocb. The function will call the
4175  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4176  * The function frees the resources or calls the completion handler if this
4177  * iocb is an abort completion. The function returns NULL when the response
4178  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4179  * this function shall chain the iocb on to the iocb_continueq and return the
4180  * response iocb passed in.
4181  **/
4182 static struct lpfc_iocbq *
4183 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4184                         struct lpfc_iocbq *rspiocbp)
4185 {
4186         struct lpfc_iocbq *saveq;
4187         struct lpfc_iocbq *cmdiocb;
4188         struct lpfc_iocbq *next_iocb;
4189         IOCB_t *irsp;
4190         uint32_t free_saveq;
4191         u8 cmd_type;
4192         lpfc_iocb_type type;
4193         unsigned long iflag;
4194         u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4195         u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4196         u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4197         int rc;
4198
4199         spin_lock_irqsave(&phba->hbalock, iflag);
4200         /* First add the response iocb to the countinueq list */
4201         list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4202         pring->iocb_continueq_cnt++;
4203
4204         /*
4205          * By default, the driver expects to free all resources
4206          * associated with this iocb completion.
4207          */
4208         free_saveq = 1;
4209         saveq = list_get_first(&pring->iocb_continueq,
4210                                struct lpfc_iocbq, list);
4211         list_del_init(&pring->iocb_continueq);
4212         pring->iocb_continueq_cnt = 0;
4213
4214         pring->stats.iocb_rsp++;
4215
4216         /*
4217          * If resource errors reported from HBA, reduce
4218          * queuedepths of the SCSI device.
4219          */
4220         if (ulp_status == IOSTAT_LOCAL_REJECT &&
4221             ((ulp_word4 & IOERR_PARAM_MASK) ==
4222              IOERR_NO_RESOURCES)) {
4223                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4224                 phba->lpfc_rampdown_queue_depth(phba);
4225                 spin_lock_irqsave(&phba->hbalock, iflag);
4226         }
4227
4228         if (ulp_status) {
4229                 /* Rsp ring <ringno> error: IOCB */
4230                 if (phba->sli_rev < LPFC_SLI_REV4) {
4231                         irsp = &rspiocbp->iocb;
4232                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4233                                         "0328 Rsp Ring %d error: ulp_status x%x "
4234                                         "IOCB Data: "
4235                                         "x%08x x%08x x%08x x%08x "
4236                                         "x%08x x%08x x%08x x%08x "
4237                                         "x%08x x%08x x%08x x%08x "
4238                                         "x%08x x%08x x%08x x%08x\n",
4239                                         pring->ringno, ulp_status,
4240                                         get_job_ulpword(rspiocbp, 0),
4241                                         get_job_ulpword(rspiocbp, 1),
4242                                         get_job_ulpword(rspiocbp, 2),
4243                                         get_job_ulpword(rspiocbp, 3),
4244                                         get_job_ulpword(rspiocbp, 4),
4245                                         get_job_ulpword(rspiocbp, 5),
4246                                         *(((uint32_t *)irsp) + 6),
4247                                         *(((uint32_t *)irsp) + 7),
4248                                         *(((uint32_t *)irsp) + 8),
4249                                         *(((uint32_t *)irsp) + 9),
4250                                         *(((uint32_t *)irsp) + 10),
4251                                         *(((uint32_t *)irsp) + 11),
4252                                         *(((uint32_t *)irsp) + 12),
4253                                         *(((uint32_t *)irsp) + 13),
4254                                         *(((uint32_t *)irsp) + 14),
4255                                         *(((uint32_t *)irsp) + 15));
4256                 } else {
4257                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4258                                         "0321 Rsp Ring %d error: "
4259                                         "IOCB Data: "
4260                                         "x%x x%x x%x x%x\n",
4261                                         pring->ringno,
4262                                         rspiocbp->wcqe_cmpl.word0,
4263                                         rspiocbp->wcqe_cmpl.total_data_placed,
4264                                         rspiocbp->wcqe_cmpl.parameter,
4265                                         rspiocbp->wcqe_cmpl.word3);
4266                 }
4267         }
4268
4269
4270         /*
4271          * Fetch the iocb command type and call the correct completion
4272          * routine. Solicited and Unsolicited IOCBs on the ELS ring
4273          * get freed back to the lpfc_iocb_list by the discovery
4274          * kernel thread.
4275          */
4276         cmd_type = ulp_command & CMD_IOCB_MASK;
4277         type = lpfc_sli_iocb_cmd_type(cmd_type);
4278         switch (type) {
4279         case LPFC_SOL_IOCB:
4280                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4281                 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4282                 spin_lock_irqsave(&phba->hbalock, iflag);
4283                 break;
4284         case LPFC_UNSOL_IOCB:
4285                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4286                 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4287                 spin_lock_irqsave(&phba->hbalock, iflag);
4288                 if (!rc)
4289                         free_saveq = 0;
4290                 break;
4291         case LPFC_ABORT_IOCB:
4292                 cmdiocb = NULL;
4293                 if (ulp_command != CMD_XRI_ABORTED_CX)
4294                         cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4295                                                         saveq);
4296                 if (cmdiocb) {
4297                         /* Call the specified completion routine */
4298                         if (cmdiocb->cmd_cmpl) {
4299                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4300                                 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4301                                 spin_lock_irqsave(&phba->hbalock, iflag);
4302                         } else {
4303                                 __lpfc_sli_release_iocbq(phba, cmdiocb);
4304                         }
4305                 }
4306                 break;
4307         case LPFC_UNKNOWN_IOCB:
4308                 if (ulp_command == CMD_ADAPTER_MSG) {
4309                         char adaptermsg[LPFC_MAX_ADPTMSG];
4310
4311                         memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4312                         memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4313                                MAX_MSG_DATA);
4314                         dev_warn(&((phba->pcidev)->dev),
4315                                  "lpfc%d: %s\n",
4316                                  phba->brd_no, adaptermsg);
4317                 } else {
4318                         /* Unknown command */
4319                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4320                                         "0335 Unknown IOCB "
4321                                         "command Data: x%x "
4322                                         "x%x x%x x%x\n",
4323                                         ulp_command,
4324                                         ulp_status,
4325                                         get_wqe_reqtag(rspiocbp),
4326                                         get_job_ulpcontext(phba, rspiocbp));
4327                 }
4328                 break;
4329         }
4330
4331         if (free_saveq) {
4332                 list_for_each_entry_safe(rspiocbp, next_iocb,
4333                                          &saveq->list, list) {
4334                         list_del_init(&rspiocbp->list);
4335                         __lpfc_sli_release_iocbq(phba, rspiocbp);
4336                 }
4337                 __lpfc_sli_release_iocbq(phba, saveq);
4338         }
4339         rspiocbp = NULL;
4340         spin_unlock_irqrestore(&phba->hbalock, iflag);
4341         return rspiocbp;
4342 }
4343
4344 /**
4345  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4346  * @phba: Pointer to HBA context object.
4347  * @pring: Pointer to driver SLI ring object.
4348  * @mask: Host attention register mask for this ring.
4349  *
4350  * This routine wraps the actual slow_ring event process routine from the
4351  * API jump table function pointer from the lpfc_hba struct.
4352  **/
4353 void
4354 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4355                                 struct lpfc_sli_ring *pring, uint32_t mask)
4356 {
4357         phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4358 }
4359
4360 /**
4361  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4362  * @phba: Pointer to HBA context object.
4363  * @pring: Pointer to driver SLI ring object.
4364  * @mask: Host attention register mask for this ring.
4365  *
4366  * This function is called from the worker thread when there is a ring event
4367  * for non-fcp rings. The caller does not hold any lock. The function will
4368  * remove each response iocb in the response ring and calls the handle
4369  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4370  **/
4371 static void
4372 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4373                                    struct lpfc_sli_ring *pring, uint32_t mask)
4374 {
4375         struct lpfc_pgp *pgp;
4376         IOCB_t *entry;
4377         IOCB_t *irsp = NULL;
4378         struct lpfc_iocbq *rspiocbp = NULL;
4379         uint32_t portRspPut, portRspMax;
4380         unsigned long iflag;
4381         uint32_t status;
4382
4383         pgp = &phba->port_gp[pring->ringno];
4384         spin_lock_irqsave(&phba->hbalock, iflag);
4385         pring->stats.iocb_event++;
4386
4387         /*
4388          * The next available response entry should never exceed the maximum
4389          * entries.  If it does, treat it as an adapter hardware error.
4390          */
4391         portRspMax = pring->sli.sli3.numRiocb;
4392         portRspPut = le32_to_cpu(pgp->rspPutInx);
4393         if (portRspPut >= portRspMax) {
4394                 /*
4395                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4396                  * rsp ring <portRspMax>
4397                  */
4398                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4399                                 "0303 Ring %d handler: portRspPut %d "
4400                                 "is bigger than rsp ring %d\n",
4401                                 pring->ringno, portRspPut, portRspMax);
4402
4403                 phba->link_state = LPFC_HBA_ERROR;
4404                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4405
4406                 phba->work_hs = HS_FFER3;
4407                 lpfc_handle_eratt(phba);
4408
4409                 return;
4410         }
4411
4412         rmb();
4413         while (pring->sli.sli3.rspidx != portRspPut) {
4414                 /*
4415                  * Build a completion list and call the appropriate handler.
4416                  * The process is to get the next available response iocb, get
4417                  * a free iocb from the list, copy the response data into the
4418                  * free iocb, insert to the continuation list, and update the
4419                  * next response index to slim.  This process makes response
4420                  * iocb's in the ring available to DMA as fast as possible but
4421                  * pays a penalty for a copy operation.  Since the iocb is
4422                  * only 32 bytes, this penalty is considered small relative to
4423                  * the PCI reads for register values and a slim write.  When
4424                  * the ulpLe field is set, the entire Command has been
4425                  * received.
4426                  */
4427                 entry = lpfc_resp_iocb(phba, pring);
4428
4429                 phba->last_completion_time = jiffies;
4430                 rspiocbp = __lpfc_sli_get_iocbq(phba);
4431                 if (rspiocbp == NULL) {
4432                         printk(KERN_ERR "%s: out of buffers! Failing "
4433                                "completion.\n", __func__);
4434                         break;
4435                 }
4436
4437                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4438                                       phba->iocb_rsp_size);
4439                 irsp = &rspiocbp->iocb;
4440
4441                 if (++pring->sli.sli3.rspidx >= portRspMax)
4442                         pring->sli.sli3.rspidx = 0;
4443
4444                 if (pring->ringno == LPFC_ELS_RING) {
4445                         lpfc_debugfs_slow_ring_trc(phba,
4446                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4447                                 *(((uint32_t *) irsp) + 4),
4448                                 *(((uint32_t *) irsp) + 6),
4449                                 *(((uint32_t *) irsp) + 7));
4450                 }
4451
4452                 writel(pring->sli.sli3.rspidx,
4453                         &phba->host_gp[pring->ringno].rspGetInx);
4454
4455                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4456                 /* Handle the response IOCB */
4457                 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4458                 spin_lock_irqsave(&phba->hbalock, iflag);
4459
4460                 /*
4461                  * If the port response put pointer has not been updated, sync
4462                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4463                  * response put pointer.
4464                  */
4465                 if (pring->sli.sli3.rspidx == portRspPut) {
4466                         portRspPut = le32_to_cpu(pgp->rspPutInx);
4467                 }
4468         } /* while (pring->sli.sli3.rspidx != portRspPut) */
4469
4470         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4471                 /* At least one response entry has been freed */
4472                 pring->stats.iocb_rsp_full++;
4473                 /* SET RxRE_RSP in Chip Att register */
4474                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4475                 writel(status, phba->CAregaddr);
4476                 readl(phba->CAregaddr); /* flush */
4477         }
4478         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4479                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4480                 pring->stats.iocb_cmd_empty++;
4481
4482                 /* Force update of the local copy of cmdGetInx */
4483                 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4484                 lpfc_sli_resume_iocb(phba, pring);
4485
4486                 if ((pring->lpfc_sli_cmd_available))
4487                         (pring->lpfc_sli_cmd_available) (phba, pring);
4488
4489         }
4490
4491         spin_unlock_irqrestore(&phba->hbalock, iflag);
4492         return;
4493 }
4494
4495 /**
4496  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4497  * @phba: Pointer to HBA context object.
4498  * @pring: Pointer to driver SLI ring object.
4499  * @mask: Host attention register mask for this ring.
4500  *
4501  * This function is called from the worker thread when there is a pending
4502  * ELS response iocb on the driver internal slow-path response iocb worker
4503  * queue. The caller does not hold any lock. The function will remove each
4504  * response iocb from the response worker queue and calls the handle
4505  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4506  **/
4507 static void
4508 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4509                                    struct lpfc_sli_ring *pring, uint32_t mask)
4510 {
4511         struct lpfc_iocbq *irspiocbq;
4512         struct hbq_dmabuf *dmabuf;
4513         struct lpfc_cq_event *cq_event;
4514         unsigned long iflag;
4515         int count = 0;
4516
4517         spin_lock_irqsave(&phba->hbalock, iflag);
4518         phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4519         spin_unlock_irqrestore(&phba->hbalock, iflag);
4520         while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4521                 /* Get the response iocb from the head of work queue */
4522                 spin_lock_irqsave(&phba->hbalock, iflag);
4523                 list_remove_head(&phba->sli4_hba.sp_queue_event,
4524                                  cq_event, struct lpfc_cq_event, list);
4525                 spin_unlock_irqrestore(&phba->hbalock, iflag);
4526
4527                 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4528                 case CQE_CODE_COMPL_WQE:
4529                         irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4530                                                  cq_event);
4531                         /* Translate ELS WCQE to response IOCBQ */
4532                         irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4533                                                                       irspiocbq);
4534                         if (irspiocbq)
4535                                 lpfc_sli_sp_handle_rspiocb(phba, pring,
4536                                                            irspiocbq);
4537                         count++;
4538                         break;
4539                 case CQE_CODE_RECEIVE:
4540                 case CQE_CODE_RECEIVE_V1:
4541                         dmabuf = container_of(cq_event, struct hbq_dmabuf,
4542                                               cq_event);
4543                         lpfc_sli4_handle_received_buffer(phba, dmabuf);
4544                         count++;
4545                         break;
4546                 default:
4547                         break;
4548                 }
4549
4550                 /* Limit the number of events to 64 to avoid soft lockups */
4551                 if (count == 64)
4552                         break;
4553         }
4554 }
4555
4556 /**
4557  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4558  * @phba: Pointer to HBA context object.
4559  * @pring: Pointer to driver SLI ring object.
4560  *
4561  * This function aborts all iocbs in the given ring and frees all the iocb
4562  * objects in txq. This function issues an abort iocb for all the iocb commands
4563  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4564  * the return of this function. The caller is not required to hold any locks.
4565  **/
4566 void
4567 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4568 {
4569         LIST_HEAD(tx_completions);
4570         LIST_HEAD(txcmplq_completions);
4571         struct lpfc_iocbq *iocb, *next_iocb;
4572         int offline;
4573
4574         if (pring->ringno == LPFC_ELS_RING) {
4575                 lpfc_fabric_abort_hba(phba);
4576         }
4577         offline = pci_channel_offline(phba->pcidev);
4578
4579         /* Error everything on txq and txcmplq
4580          * First do the txq.
4581          */
4582         if (phba->sli_rev >= LPFC_SLI_REV4) {
4583                 spin_lock_irq(&pring->ring_lock);
4584                 list_splice_init(&pring->txq, &tx_completions);
4585                 pring->txq_cnt = 0;
4586
4587                 if (offline) {
4588                         list_splice_init(&pring->txcmplq,
4589                                          &txcmplq_completions);
4590                 } else {
4591                         /* Next issue ABTS for everything on the txcmplq */
4592                         list_for_each_entry_safe(iocb, next_iocb,
4593                                                  &pring->txcmplq, list)
4594                                 lpfc_sli_issue_abort_iotag(phba, pring,
4595                                                            iocb, NULL);
4596                 }
4597                 spin_unlock_irq(&pring->ring_lock);
4598         } else {
4599                 spin_lock_irq(&phba->hbalock);
4600                 list_splice_init(&pring->txq, &tx_completions);
4601                 pring->txq_cnt = 0;
4602
4603                 if (offline) {
4604                         list_splice_init(&pring->txcmplq, &txcmplq_completions);
4605                 } else {
4606                         /* Next issue ABTS for everything on the txcmplq */
4607                         list_for_each_entry_safe(iocb, next_iocb,
4608                                                  &pring->txcmplq, list)
4609                                 lpfc_sli_issue_abort_iotag(phba, pring,
4610                                                            iocb, NULL);
4611                 }
4612                 spin_unlock_irq(&phba->hbalock);
4613         }
4614
4615         if (offline) {
4616                 /* Cancel all the IOCBs from the completions list */
4617                 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4618                                       IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4619         } else {
4620                 /* Make sure HBA is alive */
4621                 lpfc_issue_hb_tmo(phba);
4622         }
4623         /* Cancel all the IOCBs from the completions list */
4624         lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4625                               IOERR_SLI_ABORTED);
4626 }
4627
4628 /**
4629  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4630  * @phba: Pointer to HBA context object.
4631  *
4632  * This function aborts all iocbs in FCP rings and frees all the iocb
4633  * objects in txq. This function issues an abort iocb for all the iocb commands
4634  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4635  * the return of this function. The caller is not required to hold any locks.
4636  **/
4637 void
4638 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4639 {
4640         struct lpfc_sli *psli = &phba->sli;
4641         struct lpfc_sli_ring  *pring;
4642         uint32_t i;
4643
4644         /* Look on all the FCP Rings for the iotag */
4645         if (phba->sli_rev >= LPFC_SLI_REV4) {
4646                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4647                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4648                         lpfc_sli_abort_iocb_ring(phba, pring);
4649                 }
4650         } else {
4651                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4652                 lpfc_sli_abort_iocb_ring(phba, pring);
4653         }
4654 }
4655
4656 /**
4657  * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4658  * @phba: Pointer to HBA context object.
4659  *
4660  * This function flushes all iocbs in the IO ring and frees all the iocb
4661  * objects in txq and txcmplq. This function will not issue abort iocbs
4662  * for all the iocb commands in txcmplq, they will just be returned with
4663  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4664  * slot has been permanently disabled.
4665  **/
4666 void
4667 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4668 {
4669         LIST_HEAD(txq);
4670         LIST_HEAD(txcmplq);
4671         struct lpfc_sli *psli = &phba->sli;
4672         struct lpfc_sli_ring  *pring;
4673         uint32_t i;
4674         struct lpfc_iocbq *piocb, *next_iocb;
4675
4676         spin_lock_irq(&phba->hbalock);
4677         /* Indicate the I/O queues are flushed */
4678         phba->hba_flag |= HBA_IOQ_FLUSH;
4679         spin_unlock_irq(&phba->hbalock);
4680
4681         /* Look on all the FCP Rings for the iotag */
4682         if (phba->sli_rev >= LPFC_SLI_REV4) {
4683                 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4684                         pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4685
4686                         spin_lock_irq(&pring->ring_lock);
4687                         /* Retrieve everything on txq */
4688                         list_splice_init(&pring->txq, &txq);
4689                         list_for_each_entry_safe(piocb, next_iocb,
4690                                                  &pring->txcmplq, list)
4691                                 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4692                         /* Retrieve everything on the txcmplq */
4693                         list_splice_init(&pring->txcmplq, &txcmplq);
4694                         pring->txq_cnt = 0;
4695                         pring->txcmplq_cnt = 0;
4696                         spin_unlock_irq(&pring->ring_lock);
4697
4698                         /* Flush the txq */
4699                         lpfc_sli_cancel_iocbs(phba, &txq,
4700                                               IOSTAT_LOCAL_REJECT,
4701                                               IOERR_SLI_DOWN);
4702                         /* Flush the txcmplq */
4703                         lpfc_sli_cancel_iocbs(phba, &txcmplq,
4704                                               IOSTAT_LOCAL_REJECT,
4705                                               IOERR_SLI_DOWN);
4706                         if (unlikely(pci_channel_offline(phba->pcidev)))
4707                                 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4708                 }
4709         } else {
4710                 pring = &psli->sli3_ring[LPFC_FCP_RING];
4711
4712                 spin_lock_irq(&phba->hbalock);
4713                 /* Retrieve everything on txq */
4714                 list_splice_init(&pring->txq, &txq);
4715                 list_for_each_entry_safe(piocb, next_iocb,
4716                                          &pring->txcmplq, list)
4717                         piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4718                 /* Retrieve everything on the txcmplq */
4719                 list_splice_init(&pring->txcmplq, &txcmplq);
4720                 pring->txq_cnt = 0;
4721                 pring->txcmplq_cnt = 0;
4722                 spin_unlock_irq(&phba->hbalock);
4723
4724                 /* Flush the txq */
4725                 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4726                                       IOERR_SLI_DOWN);
4727                 /* Flush the txcmpq */
4728                 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4729                                       IOERR_SLI_DOWN);
4730         }
4731 }
4732
4733 /**
4734  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4735  * @phba: Pointer to HBA context object.
4736  * @mask: Bit mask to be checked.
4737  *
4738  * This function reads the host status register and compares
4739  * with the provided bit mask to check if HBA completed
4740  * the restart. This function will wait in a loop for the
4741  * HBA to complete restart. If the HBA does not restart within
4742  * 15 iterations, the function will reset the HBA again. The
4743  * function returns 1 when HBA fail to restart otherwise returns
4744  * zero.
4745  **/
4746 static int
4747 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4748 {
4749         uint32_t status;
4750         int i = 0;
4751         int retval = 0;
4752
4753         /* Read the HBA Host Status Register */
4754         if (lpfc_readl(phba->HSregaddr, &status))
4755                 return 1;
4756
4757         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4758
4759         /*
4760          * Check status register every 100ms for 5 retries, then every
4761          * 500ms for 5, then every 2.5 sec for 5, then reset board and
4762          * every 2.5 sec for 4.
4763          * Break our of the loop if errors occurred during init.
4764          */
4765         while (((status & mask) != mask) &&
4766                !(status & HS_FFERM) &&
4767                i++ < 20) {
4768
4769                 if (i <= 5)
4770                         msleep(10);
4771                 else if (i <= 10)
4772                         msleep(500);
4773                 else
4774                         msleep(2500);
4775
4776                 if (i == 15) {
4777                                 /* Do post */
4778                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4779                         lpfc_sli_brdrestart(phba);
4780                 }
4781                 /* Read the HBA Host Status Register */
4782                 if (lpfc_readl(phba->HSregaddr, &status)) {
4783                         retval = 1;
4784                         break;
4785                 }
4786         }
4787
4788         /* Check to see if any errors occurred during init */
4789         if ((status & HS_FFERM) || (i >= 20)) {
4790                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4791                                 "2751 Adapter failed to restart, "
4792                                 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4793                                 status,
4794                                 readl(phba->MBslimaddr + 0xa8),
4795                                 readl(phba->MBslimaddr + 0xac));
4796                 phba->link_state = LPFC_HBA_ERROR;
4797                 retval = 1;
4798         }
4799
4800         return retval;
4801 }
4802
4803 /**
4804  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4805  * @phba: Pointer to HBA context object.
4806  * @mask: Bit mask to be checked.
4807  *
4808  * This function checks the host status register to check if HBA is
4809  * ready. This function will wait in a loop for the HBA to be ready
4810  * If the HBA is not ready , the function will will reset the HBA PCI
4811  * function again. The function returns 1 when HBA fail to be ready
4812  * otherwise returns zero.
4813  **/
4814 static int
4815 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4816 {
4817         uint32_t status;
4818         int retval = 0;
4819
4820         /* Read the HBA Host Status Register */
4821         status = lpfc_sli4_post_status_check(phba);
4822
4823         if (status) {
4824                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4825                 lpfc_sli_brdrestart(phba);
4826                 status = lpfc_sli4_post_status_check(phba);
4827         }
4828
4829         /* Check to see if any errors occurred during init */
4830         if (status) {
4831                 phba->link_state = LPFC_HBA_ERROR;
4832                 retval = 1;
4833         } else
4834                 phba->sli4_hba.intr_enable = 0;
4835
4836         phba->hba_flag &= ~HBA_SETUP;
4837         return retval;
4838 }
4839
4840 /**
4841  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4842  * @phba: Pointer to HBA context object.
4843  * @mask: Bit mask to be checked.
4844  *
4845  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4846  * from the API jump table function pointer from the lpfc_hba struct.
4847  **/
4848 int
4849 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4850 {
4851         return phba->lpfc_sli_brdready(phba, mask);
4852 }
4853
4854 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4855
4856 /**
4857  * lpfc_reset_barrier - Make HBA ready for HBA reset
4858  * @phba: Pointer to HBA context object.
4859  *
4860  * This function is called before resetting an HBA. This function is called
4861  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4862  **/
4863 void lpfc_reset_barrier(struct lpfc_hba *phba)
4864 {
4865         uint32_t __iomem *resp_buf;
4866         uint32_t __iomem *mbox_buf;
4867         volatile struct MAILBOX_word0 mbox;
4868         uint32_t hc_copy, ha_copy, resp_data;
4869         int  i;
4870         uint8_t hdrtype;
4871
4872         lockdep_assert_held(&phba->hbalock);
4873
4874         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4875         if (hdrtype != 0x80 ||
4876             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4877              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4878                 return;
4879
4880         /*
4881          * Tell the other part of the chip to suspend temporarily all
4882          * its DMA activity.
4883          */
4884         resp_buf = phba->MBslimaddr;
4885
4886         /* Disable the error attention */
4887         if (lpfc_readl(phba->HCregaddr, &hc_copy))
4888                 return;
4889         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4890         readl(phba->HCregaddr); /* flush */
4891         phba->link_flag |= LS_IGNORE_ERATT;
4892
4893         if (lpfc_readl(phba->HAregaddr, &ha_copy))
4894                 return;
4895         if (ha_copy & HA_ERATT) {
4896                 /* Clear Chip error bit */
4897                 writel(HA_ERATT, phba->HAregaddr);
4898                 phba->pport->stopped = 1;
4899         }
4900
4901         mbox.word0 = 0;
4902         mbox.mbxCommand = MBX_KILL_BOARD;
4903         mbox.mbxOwner = OWN_CHIP;
4904
4905         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4906         mbox_buf = phba->MBslimaddr;
4907         writel(mbox.word0, mbox_buf);
4908
4909         for (i = 0; i < 50; i++) {
4910                 if (lpfc_readl((resp_buf + 1), &resp_data))
4911                         return;
4912                 if (resp_data != ~(BARRIER_TEST_PATTERN))
4913                         mdelay(1);
4914                 else
4915                         break;
4916         }
4917         resp_data = 0;
4918         if (lpfc_readl((resp_buf + 1), &resp_data))
4919                 return;
4920         if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4921                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4922                     phba->pport->stopped)
4923                         goto restore_hc;
4924                 else
4925                         goto clear_errat;
4926         }
4927
4928         mbox.mbxOwner = OWN_HOST;
4929         resp_data = 0;
4930         for (i = 0; i < 500; i++) {
4931                 if (lpfc_readl(resp_buf, &resp_data))
4932                         return;
4933                 if (resp_data != mbox.word0)
4934                         mdelay(1);
4935                 else
4936                         break;
4937         }
4938
4939 clear_errat:
4940
4941         while (++i < 500) {
4942                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4943                         return;
4944                 if (!(ha_copy & HA_ERATT))
4945                         mdelay(1);
4946                 else
4947                         break;
4948         }
4949
4950         if (readl(phba->HAregaddr) & HA_ERATT) {
4951                 writel(HA_ERATT, phba->HAregaddr);
4952                 phba->pport->stopped = 1;
4953         }
4954
4955 restore_hc:
4956         phba->link_flag &= ~LS_IGNORE_ERATT;
4957         writel(hc_copy, phba->HCregaddr);
4958         readl(phba->HCregaddr); /* flush */
4959 }
4960
4961 /**
4962  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4963  * @phba: Pointer to HBA context object.
4964  *
4965  * This function issues a kill_board mailbox command and waits for
4966  * the error attention interrupt. This function is called for stopping
4967  * the firmware processing. The caller is not required to hold any
4968  * locks. This function calls lpfc_hba_down_post function to free
4969  * any pending commands after the kill. The function will return 1 when it
4970  * fails to kill the board else will return 0.
4971  **/
4972 int
4973 lpfc_sli_brdkill(struct lpfc_hba *phba)
4974 {
4975         struct lpfc_sli *psli;
4976         LPFC_MBOXQ_t *pmb;
4977         uint32_t status;
4978         uint32_t ha_copy;
4979         int retval;
4980         int i = 0;
4981
4982         psli = &phba->sli;
4983
4984         /* Kill HBA */
4985         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4986                         "0329 Kill HBA Data: x%x x%x\n",
4987                         phba->pport->port_state, psli->sli_flag);
4988
4989         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4990         if (!pmb)
4991                 return 1;
4992
4993         /* Disable the error attention */
4994         spin_lock_irq(&phba->hbalock);
4995         if (lpfc_readl(phba->HCregaddr, &status)) {
4996                 spin_unlock_irq(&phba->hbalock);
4997                 mempool_free(pmb, phba->mbox_mem_pool);
4998                 return 1;
4999         }
5000         status &= ~HC_ERINT_ENA;
5001         writel(status, phba->HCregaddr);
5002         readl(phba->HCregaddr); /* flush */
5003         phba->link_flag |= LS_IGNORE_ERATT;
5004         spin_unlock_irq(&phba->hbalock);
5005
5006         lpfc_kill_board(phba, pmb);
5007         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5008         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5009
5010         if (retval != MBX_SUCCESS) {
5011                 if (retval != MBX_BUSY)
5012                         mempool_free(pmb, phba->mbox_mem_pool);
5013                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5014                                 "2752 KILL_BOARD command failed retval %d\n",
5015                                 retval);
5016                 spin_lock_irq(&phba->hbalock);
5017                 phba->link_flag &= ~LS_IGNORE_ERATT;
5018                 spin_unlock_irq(&phba->hbalock);
5019                 return 1;
5020         }
5021
5022         spin_lock_irq(&phba->hbalock);
5023         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5024         spin_unlock_irq(&phba->hbalock);
5025
5026         mempool_free(pmb, phba->mbox_mem_pool);
5027
5028         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5029          * attention every 100ms for 3 seconds. If we don't get ERATT after
5030          * 3 seconds we still set HBA_ERROR state because the status of the
5031          * board is now undefined.
5032          */
5033         if (lpfc_readl(phba->HAregaddr, &ha_copy))
5034                 return 1;
5035         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5036                 mdelay(100);
5037                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5038                         return 1;
5039         }
5040
5041         del_timer_sync(&psli->mbox_tmo);
5042         if (ha_copy & HA_ERATT) {
5043                 writel(HA_ERATT, phba->HAregaddr);
5044                 phba->pport->stopped = 1;
5045         }
5046         spin_lock_irq(&phba->hbalock);
5047         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5048         psli->mbox_active = NULL;
5049         phba->link_flag &= ~LS_IGNORE_ERATT;
5050         spin_unlock_irq(&phba->hbalock);
5051
5052         lpfc_hba_down_post(phba);
5053         phba->link_state = LPFC_HBA_ERROR;
5054
5055         return ha_copy & HA_ERATT ? 0 : 1;
5056 }
5057
5058 /**
5059  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5060  * @phba: Pointer to HBA context object.
5061  *
5062  * This function resets the HBA by writing HC_INITFF to the control
5063  * register. After the HBA resets, this function resets all the iocb ring
5064  * indices. This function disables PCI layer parity checking during
5065  * the reset.
5066  * This function returns 0 always.
5067  * The caller is not required to hold any locks.
5068  **/
5069 int
5070 lpfc_sli_brdreset(struct lpfc_hba *phba)
5071 {
5072         struct lpfc_sli *psli;
5073         struct lpfc_sli_ring *pring;
5074         uint16_t cfg_value;
5075         int i;
5076
5077         psli = &phba->sli;
5078
5079         /* Reset HBA */
5080         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5081                         "0325 Reset HBA Data: x%x x%x\n",
5082                         (phba->pport) ? phba->pport->port_state : 0,
5083                         psli->sli_flag);
5084
5085         /* perform board reset */
5086         phba->fc_eventTag = 0;
5087         phba->link_events = 0;
5088         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5089         if (phba->pport) {
5090                 phba->pport->fc_myDID = 0;
5091                 phba->pport->fc_prevDID = 0;
5092         }
5093
5094         /* Turn off parity checking and serr during the physical reset */
5095         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5096                 return -EIO;
5097
5098         pci_write_config_word(phba->pcidev, PCI_COMMAND,
5099                               (cfg_value &
5100                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5101
5102         psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5103
5104         /* Now toggle INITFF bit in the Host Control Register */
5105         writel(HC_INITFF, phba->HCregaddr);
5106         mdelay(1);
5107         readl(phba->HCregaddr); /* flush */
5108         writel(0, phba->HCregaddr);
5109         readl(phba->HCregaddr); /* flush */
5110
5111         /* Restore PCI cmd register */
5112         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5113
5114         /* Initialize relevant SLI info */
5115         for (i = 0; i < psli->num_rings; i++) {
5116                 pring = &psli->sli3_ring[i];
5117                 pring->flag = 0;
5118                 pring->sli.sli3.rspidx = 0;
5119                 pring->sli.sli3.next_cmdidx  = 0;
5120                 pring->sli.sli3.local_getidx = 0;
5121                 pring->sli.sli3.cmdidx = 0;
5122                 pring->missbufcnt = 0;
5123         }
5124
5125         phba->link_state = LPFC_WARM_START;
5126         return 0;
5127 }
5128
5129 /**
5130  * lpfc_sli4_brdreset - Reset a sli-4 HBA
5131  * @phba: Pointer to HBA context object.
5132  *
5133  * This function resets a SLI4 HBA. This function disables PCI layer parity
5134  * checking during resets the device. The caller is not required to hold
5135  * any locks.
5136  *
5137  * This function returns 0 on success else returns negative error code.
5138  **/
5139 int
5140 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5141 {
5142         struct lpfc_sli *psli = &phba->sli;
5143         uint16_t cfg_value;
5144         int rc = 0;
5145
5146         /* Reset HBA */
5147         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5148                         "0295 Reset HBA Data: x%x x%x x%x\n",
5149                         phba->pport->port_state, psli->sli_flag,
5150                         phba->hba_flag);
5151
5152         /* perform board reset */
5153         phba->fc_eventTag = 0;
5154         phba->link_events = 0;
5155         phba->pport->fc_myDID = 0;
5156         phba->pport->fc_prevDID = 0;
5157         phba->hba_flag &= ~HBA_SETUP;
5158
5159         spin_lock_irq(&phba->hbalock);
5160         psli->sli_flag &= ~(LPFC_PROCESS_LA);
5161         phba->fcf.fcf_flag = 0;
5162         spin_unlock_irq(&phba->hbalock);
5163
5164         /* Now physically reset the device */
5165         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5166                         "0389 Performing PCI function reset!\n");
5167
5168         /* Turn off parity checking and serr during the physical reset */
5169         if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5170                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5171                                 "3205 PCI read Config failed\n");
5172                 return -EIO;
5173         }
5174
5175         pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5176                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5177
5178         /* Perform FCoE PCI function reset before freeing queue memory */
5179         rc = lpfc_pci_function_reset(phba);
5180
5181         /* Restore PCI cmd register */
5182         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5183
5184         return rc;
5185 }
5186
5187 /**
5188  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5189  * @phba: Pointer to HBA context object.
5190  *
5191  * This function is called in the SLI initialization code path to
5192  * restart the HBA. The caller is not required to hold any lock.
5193  * This function writes MBX_RESTART mailbox command to the SLIM and
5194  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5195  * function to free any pending commands. The function enables
5196  * POST only during the first initialization. The function returns zero.
5197  * The function does not guarantee completion of MBX_RESTART mailbox
5198  * command before the return of this function.
5199  **/
5200 static int
5201 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5202 {
5203         volatile struct MAILBOX_word0 mb;
5204         struct lpfc_sli *psli;
5205         void __iomem *to_slim;
5206
5207         spin_lock_irq(&phba->hbalock);
5208
5209         psli = &phba->sli;
5210
5211         /* Restart HBA */
5212         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5213                         "0337 Restart HBA Data: x%x x%x\n",
5214                         (phba->pport) ? phba->pport->port_state : 0,
5215                         psli->sli_flag);
5216
5217         mb.word0 = 0;
5218         mb.mbxCommand = MBX_RESTART;
5219         mb.mbxHc = 1;
5220
5221         lpfc_reset_barrier(phba);
5222
5223         to_slim = phba->MBslimaddr;
5224         writel(mb.word0, to_slim);
5225         readl(to_slim); /* flush */
5226
5227         /* Only skip post after fc_ffinit is completed */
5228         if (phba->pport && phba->pport->port_state)
5229                 mb.word0 = 1;   /* This is really setting up word1 */
5230         else
5231                 mb.word0 = 0;   /* This is really setting up word1 */
5232         to_slim = phba->MBslimaddr + sizeof (uint32_t);
5233         writel(mb.word0, to_slim);
5234         readl(to_slim); /* flush */
5235
5236         lpfc_sli_brdreset(phba);
5237         if (phba->pport)
5238                 phba->pport->stopped = 0;
5239         phba->link_state = LPFC_INIT_START;
5240         phba->hba_flag = 0;
5241         spin_unlock_irq(&phba->hbalock);
5242
5243         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5244         psli->stats_start = ktime_get_seconds();
5245
5246         /* Give the INITFF and Post time to settle. */
5247         mdelay(100);
5248
5249         lpfc_hba_down_post(phba);
5250
5251         return 0;
5252 }
5253
5254 /**
5255  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5256  * @phba: Pointer to HBA context object.
5257  *
5258  * This function is called in the SLI initialization code path to restart
5259  * a SLI4 HBA. The caller is not required to hold any lock.
5260  * At the end of the function, it calls lpfc_hba_down_post function to
5261  * free any pending commands.
5262  **/
5263 static int
5264 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5265 {
5266         struct lpfc_sli *psli = &phba->sli;
5267         int rc;
5268
5269         /* Restart HBA */
5270         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5271                         "0296 Restart HBA Data: x%x x%x\n",
5272                         phba->pport->port_state, psli->sli_flag);
5273
5274         rc = lpfc_sli4_brdreset(phba);
5275         if (rc) {
5276                 phba->link_state = LPFC_HBA_ERROR;
5277                 goto hba_down_queue;
5278         }
5279
5280         spin_lock_irq(&phba->hbalock);
5281         phba->pport->stopped = 0;
5282         phba->link_state = LPFC_INIT_START;
5283         phba->hba_flag = 0;
5284         /* Preserve FA-PWWN expectation */
5285         phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5286         spin_unlock_irq(&phba->hbalock);
5287
5288         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5289         psli->stats_start = ktime_get_seconds();
5290
5291 hba_down_queue:
5292         lpfc_hba_down_post(phba);
5293         lpfc_sli4_queue_destroy(phba);
5294
5295         return rc;
5296 }
5297
5298 /**
5299  * lpfc_sli_brdrestart - Wrapper func for restarting hba
5300  * @phba: Pointer to HBA context object.
5301  *
5302  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5303  * API jump table function pointer from the lpfc_hba struct.
5304 **/
5305 int
5306 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5307 {
5308         return phba->lpfc_sli_brdrestart(phba);
5309 }
5310
5311 /**
5312  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5313  * @phba: Pointer to HBA context object.
5314  *
5315  * This function is called after a HBA restart to wait for successful
5316  * restart of the HBA. Successful restart of the HBA is indicated by
5317  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5318  * iteration, the function will restart the HBA again. The function returns
5319  * zero if HBA successfully restarted else returns negative error code.
5320  **/
5321 int
5322 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5323 {
5324         uint32_t status, i = 0;
5325
5326         /* Read the HBA Host Status Register */
5327         if (lpfc_readl(phba->HSregaddr, &status))
5328                 return -EIO;
5329
5330         /* Check status register to see what current state is */
5331         i = 0;
5332         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5333
5334                 /* Check every 10ms for 10 retries, then every 100ms for 90
5335                  * retries, then every 1 sec for 50 retires for a total of
5336                  * ~60 seconds before reset the board again and check every
5337                  * 1 sec for 50 retries. The up to 60 seconds before the
5338                  * board ready is required by the Falcon FIPS zeroization
5339                  * complete, and any reset the board in between shall cause
5340                  * restart of zeroization, further delay the board ready.
5341                  */
5342                 if (i++ >= 200) {
5343                         /* Adapter failed to init, timeout, status reg
5344                            <status> */
5345                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5346                                         "0436 Adapter failed to init, "
5347                                         "timeout, status reg x%x, "
5348                                         "FW Data: A8 x%x AC x%x\n", status,
5349                                         readl(phba->MBslimaddr + 0xa8),
5350                                         readl(phba->MBslimaddr + 0xac));
5351                         phba->link_state = LPFC_HBA_ERROR;
5352                         return -ETIMEDOUT;
5353                 }
5354
5355                 /* Check to see if any errors occurred during init */
5356                 if (status & HS_FFERM) {
5357                         /* ERROR: During chipset initialization */
5358                         /* Adapter failed to init, chipset, status reg
5359                            <status> */
5360                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5361                                         "0437 Adapter failed to init, "
5362                                         "chipset, status reg x%x, "
5363                                         "FW Data: A8 x%x AC x%x\n", status,
5364                                         readl(phba->MBslimaddr + 0xa8),
5365                                         readl(phba->MBslimaddr + 0xac));
5366                         phba->link_state = LPFC_HBA_ERROR;
5367                         return -EIO;
5368                 }
5369
5370                 if (i <= 10)
5371                         msleep(10);
5372                 else if (i <= 100)
5373                         msleep(100);
5374                 else
5375                         msleep(1000);
5376
5377                 if (i == 150) {
5378                         /* Do post */
5379                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5380                         lpfc_sli_brdrestart(phba);
5381                 }
5382                 /* Read the HBA Host Status Register */
5383                 if (lpfc_readl(phba->HSregaddr, &status))
5384                         return -EIO;
5385         }
5386
5387         /* Check to see if any errors occurred during init */
5388         if (status & HS_FFERM) {
5389                 /* ERROR: During chipset initialization */
5390                 /* Adapter failed to init, chipset, status reg <status> */
5391                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5392                                 "0438 Adapter failed to init, chipset, "
5393                                 "status reg x%x, "
5394                                 "FW Data: A8 x%x AC x%x\n", status,
5395                                 readl(phba->MBslimaddr + 0xa8),
5396                                 readl(phba->MBslimaddr + 0xac));
5397                 phba->link_state = LPFC_HBA_ERROR;
5398                 return -EIO;
5399         }
5400
5401         phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5402
5403         /* Clear all interrupt enable conditions */
5404         writel(0, phba->HCregaddr);
5405         readl(phba->HCregaddr); /* flush */
5406
5407         /* setup host attn register */
5408         writel(0xffffffff, phba->HAregaddr);
5409         readl(phba->HAregaddr); /* flush */
5410         return 0;
5411 }
5412
5413 /**
5414  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5415  *
5416  * This function calculates and returns the number of HBQs required to be
5417  * configured.
5418  **/
5419 int
5420 lpfc_sli_hbq_count(void)
5421 {
5422         return ARRAY_SIZE(lpfc_hbq_defs);
5423 }
5424
5425 /**
5426  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5427  *
5428  * This function adds the number of hbq entries in every HBQ to get
5429  * the total number of hbq entries required for the HBA and returns
5430  * the total count.
5431  **/
5432 static int
5433 lpfc_sli_hbq_entry_count(void)
5434 {
5435         int  hbq_count = lpfc_sli_hbq_count();
5436         int  count = 0;
5437         int  i;
5438
5439         for (i = 0; i < hbq_count; ++i)
5440                 count += lpfc_hbq_defs[i]->entry_count;
5441         return count;
5442 }
5443
5444 /**
5445  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5446  *
5447  * This function calculates amount of memory required for all hbq entries
5448  * to be configured and returns the total memory required.
5449  **/
5450 int
5451 lpfc_sli_hbq_size(void)
5452 {
5453         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5454 }
5455
5456 /**
5457  * lpfc_sli_hbq_setup - configure and initialize HBQs
5458  * @phba: Pointer to HBA context object.
5459  *
5460  * This function is called during the SLI initialization to configure
5461  * all the HBQs and post buffers to the HBQ. The caller is not
5462  * required to hold any locks. This function will return zero if successful
5463  * else it will return negative error code.
5464  **/
5465 static int
5466 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5467 {
5468         int  hbq_count = lpfc_sli_hbq_count();
5469         LPFC_MBOXQ_t *pmb;
5470         MAILBOX_t *pmbox;
5471         uint32_t hbqno;
5472         uint32_t hbq_entry_index;
5473
5474                                 /* Get a Mailbox buffer to setup mailbox
5475                                  * commands for HBA initialization
5476                                  */
5477         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5478
5479         if (!pmb)
5480                 return -ENOMEM;
5481
5482         pmbox = &pmb->u.mb;
5483
5484         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5485         phba->link_state = LPFC_INIT_MBX_CMDS;
5486         phba->hbq_in_use = 1;
5487
5488         hbq_entry_index = 0;
5489         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5490                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5491                 phba->hbqs[hbqno].hbqPutIdx      = 0;
5492                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
5493                 phba->hbqs[hbqno].entry_count =
5494                         lpfc_hbq_defs[hbqno]->entry_count;
5495                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5496                         hbq_entry_index, pmb);
5497                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5498
5499                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5500                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5501                            mbxStatus <status>, ring <num> */
5502
5503                         lpfc_printf_log(phba, KERN_ERR,
5504                                         LOG_SLI | LOG_VPORT,
5505                                         "1805 Adapter failed to init. "
5506                                         "Data: x%x x%x x%x\n",
5507                                         pmbox->mbxCommand,
5508                                         pmbox->mbxStatus, hbqno);
5509
5510                         phba->link_state = LPFC_HBA_ERROR;
5511                         mempool_free(pmb, phba->mbox_mem_pool);
5512                         return -ENXIO;
5513                 }
5514         }
5515         phba->hbq_count = hbq_count;
5516
5517         mempool_free(pmb, phba->mbox_mem_pool);
5518
5519         /* Initially populate or replenish the HBQs */
5520         for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5521                 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5522         return 0;
5523 }
5524
5525 /**
5526  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5527  * @phba: Pointer to HBA context object.
5528  *
5529  * This function is called during the SLI initialization to configure
5530  * all the HBQs and post buffers to the HBQ. The caller is not
5531  * required to hold any locks. This function will return zero if successful
5532  * else it will return negative error code.
5533  **/
5534 static int
5535 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5536 {
5537         phba->hbq_in_use = 1;
5538         /**
5539          * Specific case when the MDS diagnostics is enabled and supported.
5540          * The receive buffer count is truncated to manage the incoming
5541          * traffic.
5542          **/
5543         if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5544                 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5545                         lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5546         else
5547                 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5548                         lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5549         phba->hbq_count = 1;
5550         lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5551         /* Initially populate or replenish the HBQs */
5552         return 0;
5553 }
5554
5555 /**
5556  * lpfc_sli_config_port - Issue config port mailbox command
5557  * @phba: Pointer to HBA context object.
5558  * @sli_mode: sli mode - 2/3
5559  *
5560  * This function is called by the sli initialization code path
5561  * to issue config_port mailbox command. This function restarts the
5562  * HBA firmware and issues a config_port mailbox command to configure
5563  * the SLI interface in the sli mode specified by sli_mode
5564  * variable. The caller is not required to hold any locks.
5565  * The function returns 0 if successful, else returns negative error
5566  * code.
5567  **/
5568 int
5569 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5570 {
5571         LPFC_MBOXQ_t *pmb;
5572         uint32_t resetcount = 0, rc = 0, done = 0;
5573
5574         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5575         if (!pmb) {
5576                 phba->link_state = LPFC_HBA_ERROR;
5577                 return -ENOMEM;
5578         }
5579
5580         phba->sli_rev = sli_mode;
5581         while (resetcount < 2 && !done) {
5582                 spin_lock_irq(&phba->hbalock);
5583                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5584                 spin_unlock_irq(&phba->hbalock);
5585                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5586                 lpfc_sli_brdrestart(phba);
5587                 rc = lpfc_sli_chipset_init(phba);
5588                 if (rc)
5589                         break;
5590
5591                 spin_lock_irq(&phba->hbalock);
5592                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5593                 spin_unlock_irq(&phba->hbalock);
5594                 resetcount++;
5595
5596                 /* Call pre CONFIG_PORT mailbox command initialization.  A
5597                  * value of 0 means the call was successful.  Any other
5598                  * nonzero value is a failure, but if ERESTART is returned,
5599                  * the driver may reset the HBA and try again.
5600                  */
5601                 rc = lpfc_config_port_prep(phba);
5602                 if (rc == -ERESTART) {
5603                         phba->link_state = LPFC_LINK_UNKNOWN;
5604                         continue;
5605                 } else if (rc)
5606                         break;
5607
5608                 phba->link_state = LPFC_INIT_MBX_CMDS;
5609                 lpfc_config_port(phba, pmb);
5610                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5611                 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5612                                         LPFC_SLI3_HBQ_ENABLED |
5613                                         LPFC_SLI3_CRP_ENABLED |
5614                                         LPFC_SLI3_DSS_ENABLED);
5615                 if (rc != MBX_SUCCESS) {
5616                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5617                                 "0442 Adapter failed to init, mbxCmd x%x "
5618                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5619                                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5620                         spin_lock_irq(&phba->hbalock);
5621                         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5622                         spin_unlock_irq(&phba->hbalock);
5623                         rc = -ENXIO;
5624                 } else {
5625                         /* Allow asynchronous mailbox command to go through */
5626                         spin_lock_irq(&phba->hbalock);
5627                         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5628                         spin_unlock_irq(&phba->hbalock);
5629                         done = 1;
5630
5631                         if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5632                             (pmb->u.mb.un.varCfgPort.gasabt == 0))
5633                                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5634                                         "3110 Port did not grant ASABT\n");
5635                 }
5636         }
5637         if (!done) {
5638                 rc = -EINVAL;
5639                 goto do_prep_failed;
5640         }
5641         if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5642                 if (!pmb->u.mb.un.varCfgPort.cMA) {
5643                         rc = -ENXIO;
5644                         goto do_prep_failed;
5645                 }
5646                 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5647                         phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5648                         phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5649                         phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5650                                 phba->max_vpi : phba->max_vports;
5651
5652                 } else
5653                         phba->max_vpi = 0;
5654                 if (pmb->u.mb.un.varCfgPort.gerbm)
5655                         phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5656                 if (pmb->u.mb.un.varCfgPort.gcrp)
5657                         phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5658
5659                 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5660                 phba->port_gp = phba->mbox->us.s3_pgp.port;
5661
5662                 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5663                         if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5664                                 phba->cfg_enable_bg = 0;
5665                                 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5666                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5667                                                 "0443 Adapter did not grant "
5668                                                 "BlockGuard\n");
5669                         }
5670                 }
5671         } else {
5672                 phba->hbq_get = NULL;
5673                 phba->port_gp = phba->mbox->us.s2.port;
5674                 phba->max_vpi = 0;
5675         }
5676 do_prep_failed:
5677         mempool_free(pmb, phba->mbox_mem_pool);
5678         return rc;
5679 }
5680
5681
5682 /**
5683  * lpfc_sli_hba_setup - SLI initialization function
5684  * @phba: Pointer to HBA context object.
5685  *
5686  * This function is the main SLI initialization function. This function
5687  * is called by the HBA initialization code, HBA reset code and HBA
5688  * error attention handler code. Caller is not required to hold any
5689  * locks. This function issues config_port mailbox command to configure
5690  * the SLI, setup iocb rings and HBQ rings. In the end the function
5691  * calls the config_port_post function to issue init_link mailbox
5692  * command and to start the discovery. The function will return zero
5693  * if successful, else it will return negative error code.
5694  **/
5695 int
5696 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5697 {
5698         uint32_t rc;
5699         int  i;
5700         int longs;
5701
5702         /* Enable ISR already does config_port because of config_msi mbx */
5703         if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5704                 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5705                 if (rc)
5706                         return -EIO;
5707                 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5708         }
5709         phba->fcp_embed_io = 0; /* SLI4 FC support only */
5710
5711         if (phba->sli_rev == 3) {
5712                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5713                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5714         } else {
5715                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5716                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5717                 phba->sli3_options = 0;
5718         }
5719
5720         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5721                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5722                         phba->sli_rev, phba->max_vpi);
5723         rc = lpfc_sli_ring_map(phba);
5724
5725         if (rc)
5726                 goto lpfc_sli_hba_setup_error;
5727
5728         /* Initialize VPIs. */
5729         if (phba->sli_rev == LPFC_SLI_REV3) {
5730                 /*
5731                  * The VPI bitmask and physical ID array are allocated
5732                  * and initialized once only - at driver load.  A port
5733                  * reset doesn't need to reinitialize this memory.
5734                  */
5735                 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5736                         longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5737                         phba->vpi_bmask = kcalloc(longs,
5738                                                   sizeof(unsigned long),
5739                                                   GFP_KERNEL);
5740                         if (!phba->vpi_bmask) {
5741                                 rc = -ENOMEM;
5742                                 goto lpfc_sli_hba_setup_error;
5743                         }
5744
5745                         phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5746                                                 sizeof(uint16_t),
5747                                                 GFP_KERNEL);
5748                         if (!phba->vpi_ids) {
5749                                 kfree(phba->vpi_bmask);
5750                                 rc = -ENOMEM;
5751                                 goto lpfc_sli_hba_setup_error;
5752                         }
5753                         for (i = 0; i < phba->max_vpi; i++)
5754                                 phba->vpi_ids[i] = i;
5755                 }
5756         }
5757
5758         /* Init HBQs */
5759         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5760                 rc = lpfc_sli_hbq_setup(phba);
5761                 if (rc)
5762                         goto lpfc_sli_hba_setup_error;
5763         }
5764         spin_lock_irq(&phba->hbalock);
5765         phba->sli.sli_flag |= LPFC_PROCESS_LA;
5766         spin_unlock_irq(&phba->hbalock);
5767
5768         rc = lpfc_config_port_post(phba);
5769         if (rc)
5770                 goto lpfc_sli_hba_setup_error;
5771
5772         return rc;
5773
5774 lpfc_sli_hba_setup_error:
5775         phba->link_state = LPFC_HBA_ERROR;
5776         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5777                         "0445 Firmware initialization failed\n");
5778         return rc;
5779 }
5780
5781 /**
5782  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5783  * @phba: Pointer to HBA context object.
5784  *
5785  * This function issue a dump mailbox command to read config region
5786  * 23 and parse the records in the region and populate driver
5787  * data structure.
5788  **/
5789 static int
5790 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5791 {
5792         LPFC_MBOXQ_t *mboxq;
5793         struct lpfc_dmabuf *mp;
5794         struct lpfc_mqe *mqe;
5795         uint32_t data_length;
5796         int rc;
5797
5798         /* Program the default value of vlan_id and fc_map */
5799         phba->valid_vlan = 0;
5800         phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5801         phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5802         phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5803
5804         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5805         if (!mboxq)
5806                 return -ENOMEM;
5807
5808         mqe = &mboxq->u.mqe;
5809         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5810                 rc = -ENOMEM;
5811                 goto out_free_mboxq;
5812         }
5813
5814         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5815         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5816
5817         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5818                         "(%d):2571 Mailbox cmd x%x Status x%x "
5819                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5820                         "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5821                         "CQ: x%x x%x x%x x%x\n",
5822                         mboxq->vport ? mboxq->vport->vpi : 0,
5823                         bf_get(lpfc_mqe_command, mqe),
5824                         bf_get(lpfc_mqe_status, mqe),
5825                         mqe->un.mb_words[0], mqe->un.mb_words[1],
5826                         mqe->un.mb_words[2], mqe->un.mb_words[3],
5827                         mqe->un.mb_words[4], mqe->un.mb_words[5],
5828                         mqe->un.mb_words[6], mqe->un.mb_words[7],
5829                         mqe->un.mb_words[8], mqe->un.mb_words[9],
5830                         mqe->un.mb_words[10], mqe->un.mb_words[11],
5831                         mqe->un.mb_words[12], mqe->un.mb_words[13],
5832                         mqe->un.mb_words[14], mqe->un.mb_words[15],
5833                         mqe->un.mb_words[16], mqe->un.mb_words[50],
5834                         mboxq->mcqe.word0,
5835                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
5836                         mboxq->mcqe.trailer);
5837
5838         if (rc) {
5839                 rc = -EIO;
5840                 goto out_free_mboxq;
5841         }
5842         data_length = mqe->un.mb_words[5];
5843         if (data_length > DMP_RGN23_SIZE) {
5844                 rc = -EIO;
5845                 goto out_free_mboxq;
5846         }
5847
5848         lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5849         rc = 0;
5850
5851 out_free_mboxq:
5852         lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5853         return rc;
5854 }
5855
5856 /**
5857  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5858  * @phba: pointer to lpfc hba data structure.
5859  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5860  * @vpd: pointer to the memory to hold resulting port vpd data.
5861  * @vpd_size: On input, the number of bytes allocated to @vpd.
5862  *            On output, the number of data bytes in @vpd.
5863  *
5864  * This routine executes a READ_REV SLI4 mailbox command.  In
5865  * addition, this routine gets the port vpd data.
5866  *
5867  * Return codes
5868  *      0 - successful
5869  *      -ENOMEM - could not allocated memory.
5870  **/
5871 static int
5872 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5873                     uint8_t *vpd, uint32_t *vpd_size)
5874 {
5875         int rc = 0;
5876         uint32_t dma_size;
5877         struct lpfc_dmabuf *dmabuf;
5878         struct lpfc_mqe *mqe;
5879
5880         dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5881         if (!dmabuf)
5882                 return -ENOMEM;
5883
5884         /*
5885          * Get a DMA buffer for the vpd data resulting from the READ_REV
5886          * mailbox command.
5887          */
5888         dma_size = *vpd_size;
5889         dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5890                                           &dmabuf->phys, GFP_KERNEL);
5891         if (!dmabuf->virt) {
5892                 kfree(dmabuf);
5893                 return -ENOMEM;
5894         }
5895
5896         /*
5897          * The SLI4 implementation of READ_REV conflicts at word1,
5898          * bits 31:16 and SLI4 adds vpd functionality not present
5899          * in SLI3.  This code corrects the conflicts.
5900          */
5901         lpfc_read_rev(phba, mboxq);
5902         mqe = &mboxq->u.mqe;
5903         mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5904         mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5905         mqe->un.read_rev.word1 &= 0x0000FFFF;
5906         bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5907         bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5908
5909         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5910         if (rc) {
5911                 dma_free_coherent(&phba->pcidev->dev, dma_size,
5912                                   dmabuf->virt, dmabuf->phys);
5913                 kfree(dmabuf);
5914                 return -EIO;
5915         }
5916
5917         /*
5918          * The available vpd length cannot be bigger than the
5919          * DMA buffer passed to the port.  Catch the less than
5920          * case and update the caller's size.
5921          */
5922         if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5923                 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5924
5925         memcpy(vpd, dmabuf->virt, *vpd_size);
5926
5927         dma_free_coherent(&phba->pcidev->dev, dma_size,
5928                           dmabuf->virt, dmabuf->phys);
5929         kfree(dmabuf);
5930         return 0;
5931 }
5932
5933 /**
5934  * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5935  * @phba: pointer to lpfc hba data structure.
5936  *
5937  * This routine retrieves SLI4 device physical port name this PCI function
5938  * is attached to.
5939  *
5940  * Return codes
5941  *      0 - successful
5942  *      otherwise - failed to retrieve controller attributes
5943  **/
5944 static int
5945 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5946 {
5947         LPFC_MBOXQ_t *mboxq;
5948         struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5949         struct lpfc_controller_attribute *cntl_attr;
5950         void *virtaddr = NULL;
5951         uint32_t alloclen, reqlen;
5952         uint32_t shdr_status, shdr_add_status;
5953         union lpfc_sli4_cfg_shdr *shdr;
5954         int rc;
5955
5956         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5957         if (!mboxq)
5958                 return -ENOMEM;
5959
5960         /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5961         reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5962         alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5963                         LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5964                         LPFC_SLI4_MBX_NEMBED);
5965
5966         if (alloclen < reqlen) {
5967                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5968                                 "3084 Allocated DMA memory size (%d) is "
5969                                 "less than the requested DMA memory size "
5970                                 "(%d)\n", alloclen, reqlen);
5971                 rc = -ENOMEM;
5972                 goto out_free_mboxq;
5973         }
5974         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5975         virtaddr = mboxq->sge_array->addr[0];
5976         mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5977         shdr = &mbx_cntl_attr->cfg_shdr;
5978         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5979         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5980         if (shdr_status || shdr_add_status || rc) {
5981                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5982                                 "3085 Mailbox x%x (x%x/x%x) failed, "
5983                                 "rc:x%x, status:x%x, add_status:x%x\n",
5984                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5985                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5986                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5987                                 rc, shdr_status, shdr_add_status);
5988                 rc = -ENXIO;
5989                 goto out_free_mboxq;
5990         }
5991
5992         cntl_attr = &mbx_cntl_attr->cntl_attr;
5993         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5994         phba->sli4_hba.lnk_info.lnk_tp =
5995                 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5996         phba->sli4_hba.lnk_info.lnk_no =
5997                 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5998         phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
5999         phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6000
6001         memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6002         strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6003                 sizeof(phba->BIOSVersion));
6004
6005         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6006                         "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6007                         "flash_id: x%02x, asic_rev: x%02x\n",
6008                         phba->sli4_hba.lnk_info.lnk_tp,
6009                         phba->sli4_hba.lnk_info.lnk_no,
6010                         phba->BIOSVersion, phba->sli4_hba.flash_id,
6011                         phba->sli4_hba.asic_rev);
6012 out_free_mboxq:
6013         if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6014                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6015         else
6016                 mempool_free(mboxq, phba->mbox_mem_pool);
6017         return rc;
6018 }
6019
6020 /**
6021  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6022  * @phba: pointer to lpfc hba data structure.
6023  *
6024  * This routine retrieves SLI4 device physical port name this PCI function
6025  * is attached to.
6026  *
6027  * Return codes
6028  *      0 - successful
6029  *      otherwise - failed to retrieve physical port name
6030  **/
6031 static int
6032 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6033 {
6034         LPFC_MBOXQ_t *mboxq;
6035         struct lpfc_mbx_get_port_name *get_port_name;
6036         uint32_t shdr_status, shdr_add_status;
6037         union lpfc_sli4_cfg_shdr *shdr;
6038         char cport_name = 0;
6039         int rc;
6040
6041         /* We assume nothing at this point */
6042         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6043         phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6044
6045         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6046         if (!mboxq)
6047                 return -ENOMEM;
6048         /* obtain link type and link number via READ_CONFIG */
6049         phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6050         lpfc_sli4_read_config(phba);
6051
6052         if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6053                 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6054
6055         if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6056                 goto retrieve_ppname;
6057
6058         /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6059         rc = lpfc_sli4_get_ctl_attr(phba);
6060         if (rc)
6061                 goto out_free_mboxq;
6062
6063 retrieve_ppname:
6064         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6065                 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6066                 sizeof(struct lpfc_mbx_get_port_name) -
6067                 sizeof(struct lpfc_sli4_cfg_mhdr),
6068                 LPFC_SLI4_MBX_EMBED);
6069         get_port_name = &mboxq->u.mqe.un.get_port_name;
6070         shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6071         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6072         bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6073                 phba->sli4_hba.lnk_info.lnk_tp);
6074         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6075         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6076         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6077         if (shdr_status || shdr_add_status || rc) {
6078                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6079                                 "3087 Mailbox x%x (x%x/x%x) failed: "
6080                                 "rc:x%x, status:x%x, add_status:x%x\n",
6081                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6082                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6083                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6084                                 rc, shdr_status, shdr_add_status);
6085                 rc = -ENXIO;
6086                 goto out_free_mboxq;
6087         }
6088         switch (phba->sli4_hba.lnk_info.lnk_no) {
6089         case LPFC_LINK_NUMBER_0:
6090                 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6091                                 &get_port_name->u.response);
6092                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6093                 break;
6094         case LPFC_LINK_NUMBER_1:
6095                 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6096                                 &get_port_name->u.response);
6097                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6098                 break;
6099         case LPFC_LINK_NUMBER_2:
6100                 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6101                                 &get_port_name->u.response);
6102                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6103                 break;
6104         case LPFC_LINK_NUMBER_3:
6105                 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6106                                 &get_port_name->u.response);
6107                 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6108                 break;
6109         default:
6110                 break;
6111         }
6112
6113         if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6114                 phba->Port[0] = cport_name;
6115                 phba->Port[1] = '\0';
6116                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6117                                 "3091 SLI get port name: %s\n", phba->Port);
6118         }
6119
6120 out_free_mboxq:
6121         if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6122                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6123         else
6124                 mempool_free(mboxq, phba->mbox_mem_pool);
6125         return rc;
6126 }
6127
6128 /**
6129  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6130  * @phba: pointer to lpfc hba data structure.
6131  *
6132  * This routine is called to explicitly arm the SLI4 device's completion and
6133  * event queues
6134  **/
6135 static void
6136 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6137 {
6138         int qidx;
6139         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6140         struct lpfc_sli4_hdw_queue *qp;
6141         struct lpfc_queue *eq;
6142
6143         sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6144         sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6145         if (sli4_hba->nvmels_cq)
6146                 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6147                                            LPFC_QUEUE_REARM);
6148
6149         if (sli4_hba->hdwq) {
6150                 /* Loop thru all Hardware Queues */
6151                 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6152                         qp = &sli4_hba->hdwq[qidx];
6153                         /* ARM the corresponding CQ */
6154                         sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6155                                                 LPFC_QUEUE_REARM);
6156                 }
6157
6158                 /* Loop thru all IRQ vectors */
6159                 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6160                         eq = sli4_hba->hba_eq_hdl[qidx].eq;
6161                         /* ARM the corresponding EQ */
6162                         sli4_hba->sli4_write_eq_db(phba, eq,
6163                                                    0, LPFC_QUEUE_REARM);
6164                 }
6165         }
6166
6167         if (phba->nvmet_support) {
6168                 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6169                         sli4_hba->sli4_write_cq_db(phba,
6170                                 sli4_hba->nvmet_cqset[qidx], 0,
6171                                 LPFC_QUEUE_REARM);
6172                 }
6173         }
6174 }
6175
6176 /**
6177  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6178  * @phba: Pointer to HBA context object.
6179  * @type: The resource extent type.
6180  * @extnt_count: buffer to hold port available extent count.
6181  * @extnt_size: buffer to hold element count per extent.
6182  *
6183  * This function calls the port and retrievs the number of available
6184  * extents and their size for a particular extent type.
6185  *
6186  * Returns: 0 if successful.  Nonzero otherwise.
6187  **/
6188 int
6189 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6190                                uint16_t *extnt_count, uint16_t *extnt_size)
6191 {
6192         int rc = 0;
6193         uint32_t length;
6194         uint32_t mbox_tmo;
6195         struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6196         LPFC_MBOXQ_t *mbox;
6197
6198         *extnt_count = 0;
6199         *extnt_size = 0;
6200
6201         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6202         if (!mbox)
6203                 return -ENOMEM;
6204
6205         /* Find out how many extents are available for this resource type */
6206         length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6207                   sizeof(struct lpfc_sli4_cfg_mhdr));
6208         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6209                          LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6210                          length, LPFC_SLI4_MBX_EMBED);
6211
6212         /* Send an extents count of 0 - the GET doesn't use it. */
6213         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6214                                         LPFC_SLI4_MBX_EMBED);
6215         if (unlikely(rc)) {
6216                 rc = -EIO;
6217                 goto err_exit;
6218         }
6219
6220         if (!phba->sli4_hba.intr_enable)
6221                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6222         else {
6223                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6224                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6225         }
6226         if (unlikely(rc)) {
6227                 rc = -EIO;
6228                 goto err_exit;
6229         }
6230
6231         rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6232         if (bf_get(lpfc_mbox_hdr_status,
6233                    &rsrc_info->header.cfg_shdr.response)) {
6234                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6235                                 "2930 Failed to get resource extents "
6236                                 "Status 0x%x Add'l Status 0x%x\n",
6237                                 bf_get(lpfc_mbox_hdr_status,
6238                                        &rsrc_info->header.cfg_shdr.response),
6239                                 bf_get(lpfc_mbox_hdr_add_status,
6240                                        &rsrc_info->header.cfg_shdr.response));
6241                 rc = -EIO;
6242                 goto err_exit;
6243         }
6244
6245         *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6246                               &rsrc_info->u.rsp);
6247         *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6248                              &rsrc_info->u.rsp);
6249
6250         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6251                         "3162 Retrieved extents type-%d from port: count:%d, "
6252                         "size:%d\n", type, *extnt_count, *extnt_size);
6253
6254 err_exit:
6255         mempool_free(mbox, phba->mbox_mem_pool);
6256         return rc;
6257 }
6258
6259 /**
6260  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6261  * @phba: Pointer to HBA context object.
6262  * @type: The extent type to check.
6263  *
6264  * This function reads the current available extents from the port and checks
6265  * if the extent count or extent size has changed since the last access.
6266  * Callers use this routine post port reset to understand if there is a
6267  * extent reprovisioning requirement.
6268  *
6269  * Returns:
6270  *   -Error: error indicates problem.
6271  *   1: Extent count or size has changed.
6272  *   0: No changes.
6273  **/
6274 static int
6275 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6276 {
6277         uint16_t curr_ext_cnt, rsrc_ext_cnt;
6278         uint16_t size_diff, rsrc_ext_size;
6279         int rc = 0;
6280         struct lpfc_rsrc_blks *rsrc_entry;
6281         struct list_head *rsrc_blk_list = NULL;
6282
6283         size_diff = 0;
6284         curr_ext_cnt = 0;
6285         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6286                                             &rsrc_ext_cnt,
6287                                             &rsrc_ext_size);
6288         if (unlikely(rc))
6289                 return -EIO;
6290
6291         switch (type) {
6292         case LPFC_RSC_TYPE_FCOE_RPI:
6293                 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6294                 break;
6295         case LPFC_RSC_TYPE_FCOE_VPI:
6296                 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6297                 break;
6298         case LPFC_RSC_TYPE_FCOE_XRI:
6299                 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6300                 break;
6301         case LPFC_RSC_TYPE_FCOE_VFI:
6302                 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6303                 break;
6304         default:
6305                 break;
6306         }
6307
6308         list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6309                 curr_ext_cnt++;
6310                 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6311                         size_diff++;
6312         }
6313
6314         if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6315                 rc = 1;
6316
6317         return rc;
6318 }
6319
6320 /**
6321  * lpfc_sli4_cfg_post_extnts -
6322  * @phba: Pointer to HBA context object.
6323  * @extnt_cnt: number of available extents.
6324  * @type: the extent type (rpi, xri, vfi, vpi).
6325  * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6326  * @mbox: pointer to the caller's allocated mailbox structure.
6327  *
6328  * This function executes the extents allocation request.  It also
6329  * takes care of the amount of memory needed to allocate or get the
6330  * allocated extents. It is the caller's responsibility to evaluate
6331  * the response.
6332  *
6333  * Returns:
6334  *   -Error:  Error value describes the condition found.
6335  *   0: if successful
6336  **/
6337 static int
6338 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6339                           uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6340 {
6341         int rc = 0;
6342         uint32_t req_len;
6343         uint32_t emb_len;
6344         uint32_t alloc_len, mbox_tmo;
6345
6346         /* Calculate the total requested length of the dma memory */
6347         req_len = extnt_cnt * sizeof(uint16_t);
6348
6349         /*
6350          * Calculate the size of an embedded mailbox.  The uint32_t
6351          * accounts for extents-specific word.
6352          */
6353         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6354                 sizeof(uint32_t);
6355
6356         /*
6357          * Presume the allocation and response will fit into an embedded
6358          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6359          */
6360         *emb = LPFC_SLI4_MBX_EMBED;
6361         if (req_len > emb_len) {
6362                 req_len = extnt_cnt * sizeof(uint16_t) +
6363                         sizeof(union lpfc_sli4_cfg_shdr) +
6364                         sizeof(uint32_t);
6365                 *emb = LPFC_SLI4_MBX_NEMBED;
6366         }
6367
6368         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6369                                      LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6370                                      req_len, *emb);
6371         if (alloc_len < req_len) {
6372                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6373                         "2982 Allocated DMA memory size (x%x) is "
6374                         "less than the requested DMA memory "
6375                         "size (x%x)\n", alloc_len, req_len);
6376                 return -ENOMEM;
6377         }
6378         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6379         if (unlikely(rc))
6380                 return -EIO;
6381
6382         if (!phba->sli4_hba.intr_enable)
6383                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6384         else {
6385                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6386                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6387         }
6388
6389         if (unlikely(rc))
6390                 rc = -EIO;
6391         return rc;
6392 }
6393
6394 /**
6395  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6396  * @phba: Pointer to HBA context object.
6397  * @type:  The resource extent type to allocate.
6398  *
6399  * This function allocates the number of elements for the specified
6400  * resource type.
6401  **/
6402 static int
6403 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6404 {
6405         bool emb = false;
6406         uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6407         uint16_t rsrc_id, rsrc_start, j, k;
6408         uint16_t *ids;
6409         int i, rc;
6410         unsigned long longs;
6411         unsigned long *bmask;
6412         struct lpfc_rsrc_blks *rsrc_blks;
6413         LPFC_MBOXQ_t *mbox;
6414         uint32_t length;
6415         struct lpfc_id_range *id_array = NULL;
6416         void *virtaddr = NULL;
6417         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6418         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6419         struct list_head *ext_blk_list;
6420
6421         rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6422                                             &rsrc_cnt,
6423                                             &rsrc_size);
6424         if (unlikely(rc))
6425                 return -EIO;
6426
6427         if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6428                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6429                         "3009 No available Resource Extents "
6430                         "for resource type 0x%x: Count: 0x%x, "
6431                         "Size 0x%x\n", type, rsrc_cnt,
6432                         rsrc_size);
6433                 return -ENOMEM;
6434         }
6435
6436         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6437                         "2903 Post resource extents type-0x%x: "
6438                         "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6439
6440         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6441         if (!mbox)
6442                 return -ENOMEM;
6443
6444         rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6445         if (unlikely(rc)) {
6446                 rc = -EIO;
6447                 goto err_exit;
6448         }
6449
6450         /*
6451          * Figure out where the response is located.  Then get local pointers
6452          * to the response data.  The port does not guarantee to respond to
6453          * all extents counts request so update the local variable with the
6454          * allocated count from the port.
6455          */
6456         if (emb == LPFC_SLI4_MBX_EMBED) {
6457                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6458                 id_array = &rsrc_ext->u.rsp.id[0];
6459                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6460         } else {
6461                 virtaddr = mbox->sge_array->addr[0];
6462                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6463                 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6464                 id_array = &n_rsrc->id;
6465         }
6466
6467         longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6468         rsrc_id_cnt = rsrc_cnt * rsrc_size;
6469
6470         /*
6471          * Based on the resource size and count, correct the base and max
6472          * resource values.
6473          */
6474         length = sizeof(struct lpfc_rsrc_blks);
6475         switch (type) {
6476         case LPFC_RSC_TYPE_FCOE_RPI:
6477                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6478                                                    sizeof(unsigned long),
6479                                                    GFP_KERNEL);
6480                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6481                         rc = -ENOMEM;
6482                         goto err_exit;
6483                 }
6484                 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6485                                                  sizeof(uint16_t),
6486                                                  GFP_KERNEL);
6487                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6488                         kfree(phba->sli4_hba.rpi_bmask);
6489                         rc = -ENOMEM;
6490                         goto err_exit;
6491                 }
6492
6493                 /*
6494                  * The next_rpi was initialized with the maximum available
6495                  * count but the port may allocate a smaller number.  Catch
6496                  * that case and update the next_rpi.
6497                  */
6498                 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6499
6500                 /* Initialize local ptrs for common extent processing later. */
6501                 bmask = phba->sli4_hba.rpi_bmask;
6502                 ids = phba->sli4_hba.rpi_ids;
6503                 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6504                 break;
6505         case LPFC_RSC_TYPE_FCOE_VPI:
6506                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6507                                           GFP_KERNEL);
6508                 if (unlikely(!phba->vpi_bmask)) {
6509                         rc = -ENOMEM;
6510                         goto err_exit;
6511                 }
6512                 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6513                                          GFP_KERNEL);
6514                 if (unlikely(!phba->vpi_ids)) {
6515                         kfree(phba->vpi_bmask);
6516                         rc = -ENOMEM;
6517                         goto err_exit;
6518                 }
6519
6520                 /* Initialize local ptrs for common extent processing later. */
6521                 bmask = phba->vpi_bmask;
6522                 ids = phba->vpi_ids;
6523                 ext_blk_list = &phba->lpfc_vpi_blk_list;
6524                 break;
6525         case LPFC_RSC_TYPE_FCOE_XRI:
6526                 phba->sli4_hba.xri_bmask = kcalloc(longs,
6527                                                    sizeof(unsigned long),
6528                                                    GFP_KERNEL);
6529                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6530                         rc = -ENOMEM;
6531                         goto err_exit;
6532                 }
6533                 phba->sli4_hba.max_cfg_param.xri_used = 0;
6534                 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6535                                                  sizeof(uint16_t),
6536                                                  GFP_KERNEL);
6537                 if (unlikely(!phba->sli4_hba.xri_ids)) {
6538                         kfree(phba->sli4_hba.xri_bmask);
6539                         rc = -ENOMEM;
6540                         goto err_exit;
6541                 }
6542
6543                 /* Initialize local ptrs for common extent processing later. */
6544                 bmask = phba->sli4_hba.xri_bmask;
6545                 ids = phba->sli4_hba.xri_ids;
6546                 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6547                 break;
6548         case LPFC_RSC_TYPE_FCOE_VFI:
6549                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6550                                                    sizeof(unsigned long),
6551                                                    GFP_KERNEL);
6552                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6553                         rc = -ENOMEM;
6554                         goto err_exit;
6555                 }
6556                 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6557                                                  sizeof(uint16_t),
6558                                                  GFP_KERNEL);
6559                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6560                         kfree(phba->sli4_hba.vfi_bmask);
6561                         rc = -ENOMEM;
6562                         goto err_exit;
6563                 }
6564
6565                 /* Initialize local ptrs for common extent processing later. */
6566                 bmask = phba->sli4_hba.vfi_bmask;
6567                 ids = phba->sli4_hba.vfi_ids;
6568                 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6569                 break;
6570         default:
6571                 /* Unsupported Opcode.  Fail call. */
6572                 id_array = NULL;
6573                 bmask = NULL;
6574                 ids = NULL;
6575                 ext_blk_list = NULL;
6576                 goto err_exit;
6577         }
6578
6579         /*
6580          * Complete initializing the extent configuration with the
6581          * allocated ids assigned to this function.  The bitmask serves
6582          * as an index into the array and manages the available ids.  The
6583          * array just stores the ids communicated to the port via the wqes.
6584          */
6585         for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6586                 if ((i % 2) == 0)
6587                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6588                                          &id_array[k]);
6589                 else
6590                         rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6591                                          &id_array[k]);
6592
6593                 rsrc_blks = kzalloc(length, GFP_KERNEL);
6594                 if (unlikely(!rsrc_blks)) {
6595                         rc = -ENOMEM;
6596                         kfree(bmask);
6597                         kfree(ids);
6598                         goto err_exit;
6599                 }
6600                 rsrc_blks->rsrc_start = rsrc_id;
6601                 rsrc_blks->rsrc_size = rsrc_size;
6602                 list_add_tail(&rsrc_blks->list, ext_blk_list);
6603                 rsrc_start = rsrc_id;
6604                 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6605                         phba->sli4_hba.io_xri_start = rsrc_start +
6606                                 lpfc_sli4_get_iocb_cnt(phba);
6607                 }
6608
6609                 while (rsrc_id < (rsrc_start + rsrc_size)) {
6610                         ids[j] = rsrc_id;
6611                         rsrc_id++;
6612                         j++;
6613                 }
6614                 /* Entire word processed.  Get next word.*/
6615                 if ((i % 2) == 1)
6616                         k++;
6617         }
6618  err_exit:
6619         lpfc_sli4_mbox_cmd_free(phba, mbox);
6620         return rc;
6621 }
6622
6623
6624
6625 /**
6626  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6627  * @phba: Pointer to HBA context object.
6628  * @type: the extent's type.
6629  *
6630  * This function deallocates all extents of a particular resource type.
6631  * SLI4 does not allow for deallocating a particular extent range.  It
6632  * is the caller's responsibility to release all kernel memory resources.
6633  **/
6634 static int
6635 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6636 {
6637         int rc;
6638         uint32_t length, mbox_tmo = 0;
6639         LPFC_MBOXQ_t *mbox;
6640         struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6641         struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6642
6643         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6644         if (!mbox)
6645                 return -ENOMEM;
6646
6647         /*
6648          * This function sends an embedded mailbox because it only sends the
6649          * the resource type.  All extents of this type are released by the
6650          * port.
6651          */
6652         length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6653                   sizeof(struct lpfc_sli4_cfg_mhdr));
6654         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6655                          LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6656                          length, LPFC_SLI4_MBX_EMBED);
6657
6658         /* Send an extents count of 0 - the dealloc doesn't use it. */
6659         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6660                                         LPFC_SLI4_MBX_EMBED);
6661         if (unlikely(rc)) {
6662                 rc = -EIO;
6663                 goto out_free_mbox;
6664         }
6665         if (!phba->sli4_hba.intr_enable)
6666                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6667         else {
6668                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6669                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6670         }
6671         if (unlikely(rc)) {
6672                 rc = -EIO;
6673                 goto out_free_mbox;
6674         }
6675
6676         dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6677         if (bf_get(lpfc_mbox_hdr_status,
6678                    &dealloc_rsrc->header.cfg_shdr.response)) {
6679                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6680                                 "2919 Failed to release resource extents "
6681                                 "for type %d - Status 0x%x Add'l Status 0x%x. "
6682                                 "Resource memory not released.\n",
6683                                 type,
6684                                 bf_get(lpfc_mbox_hdr_status,
6685                                     &dealloc_rsrc->header.cfg_shdr.response),
6686                                 bf_get(lpfc_mbox_hdr_add_status,
6687                                     &dealloc_rsrc->header.cfg_shdr.response));
6688                 rc = -EIO;
6689                 goto out_free_mbox;
6690         }
6691
6692         /* Release kernel memory resources for the specific type. */
6693         switch (type) {
6694         case LPFC_RSC_TYPE_FCOE_VPI:
6695                 kfree(phba->vpi_bmask);
6696                 kfree(phba->vpi_ids);
6697                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6698                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6699                                     &phba->lpfc_vpi_blk_list, list) {
6700                         list_del_init(&rsrc_blk->list);
6701                         kfree(rsrc_blk);
6702                 }
6703                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6704                 break;
6705         case LPFC_RSC_TYPE_FCOE_XRI:
6706                 kfree(phba->sli4_hba.xri_bmask);
6707                 kfree(phba->sli4_hba.xri_ids);
6708                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6709                                     &phba->sli4_hba.lpfc_xri_blk_list, list) {
6710                         list_del_init(&rsrc_blk->list);
6711                         kfree(rsrc_blk);
6712                 }
6713                 break;
6714         case LPFC_RSC_TYPE_FCOE_VFI:
6715                 kfree(phba->sli4_hba.vfi_bmask);
6716                 kfree(phba->sli4_hba.vfi_ids);
6717                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6718                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6719                                     &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6720                         list_del_init(&rsrc_blk->list);
6721                         kfree(rsrc_blk);
6722                 }
6723                 break;
6724         case LPFC_RSC_TYPE_FCOE_RPI:
6725                 /* RPI bitmask and physical id array are cleaned up earlier. */
6726                 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6727                                     &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6728                         list_del_init(&rsrc_blk->list);
6729                         kfree(rsrc_blk);
6730                 }
6731                 break;
6732         default:
6733                 break;
6734         }
6735
6736         bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6737
6738  out_free_mbox:
6739         mempool_free(mbox, phba->mbox_mem_pool);
6740         return rc;
6741 }
6742
6743 static void
6744 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6745                   uint32_t feature)
6746 {
6747         uint32_t len;
6748         u32 sig_freq = 0;
6749
6750         len = sizeof(struct lpfc_mbx_set_feature) -
6751                 sizeof(struct lpfc_sli4_cfg_mhdr);
6752         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6753                          LPFC_MBOX_OPCODE_SET_FEATURES, len,
6754                          LPFC_SLI4_MBX_EMBED);
6755
6756         switch (feature) {
6757         case LPFC_SET_UE_RECOVERY:
6758                 bf_set(lpfc_mbx_set_feature_UER,
6759                        &mbox->u.mqe.un.set_feature, 1);
6760                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6761                 mbox->u.mqe.un.set_feature.param_len = 8;
6762                 break;
6763         case LPFC_SET_MDS_DIAGS:
6764                 bf_set(lpfc_mbx_set_feature_mds,
6765                        &mbox->u.mqe.un.set_feature, 1);
6766                 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6767                        &mbox->u.mqe.un.set_feature, 1);
6768                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6769                 mbox->u.mqe.un.set_feature.param_len = 8;
6770                 break;
6771         case LPFC_SET_CGN_SIGNAL:
6772                 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6773                         sig_freq = 0;
6774                 else
6775                         sig_freq = phba->cgn_sig_freq;
6776
6777                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6778                         bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6779                                &mbox->u.mqe.un.set_feature, sig_freq);
6780                         bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6781                                &mbox->u.mqe.un.set_feature, sig_freq);
6782                 }
6783
6784                 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6785                         bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6786                                &mbox->u.mqe.un.set_feature, sig_freq);
6787
6788                 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6789                     phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6790                         sig_freq = 0;
6791                 else
6792                         sig_freq = lpfc_acqe_cgn_frequency;
6793
6794                 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6795                        &mbox->u.mqe.un.set_feature, sig_freq);
6796
6797                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6798                 mbox->u.mqe.un.set_feature.param_len = 12;
6799                 break;
6800         case LPFC_SET_DUAL_DUMP:
6801                 bf_set(lpfc_mbx_set_feature_dd,
6802                        &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6803                 bf_set(lpfc_mbx_set_feature_ddquery,
6804                        &mbox->u.mqe.un.set_feature, 0);
6805                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6806                 mbox->u.mqe.un.set_feature.param_len = 4;
6807                 break;
6808         case LPFC_SET_ENABLE_MI:
6809                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6810                 mbox->u.mqe.un.set_feature.param_len = 4;
6811                 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6812                        phba->pport->cfg_lun_queue_depth);
6813                 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6814                        phba->sli4_hba.pc_sli4_params.mi_ver);
6815                 break;
6816         case LPFC_SET_LD_SIGNAL:
6817                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
6818                 mbox->u.mqe.un.set_feature.param_len = 16;
6819                 bf_set(lpfc_mbx_set_feature_lds_qry,
6820                        &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
6821                 break;
6822         case LPFC_SET_ENABLE_CMF:
6823                 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6824                 mbox->u.mqe.un.set_feature.param_len = 4;
6825                 bf_set(lpfc_mbx_set_feature_cmf,
6826                        &mbox->u.mqe.un.set_feature, 1);
6827                 break;
6828         }
6829         return;
6830 }
6831
6832 /**
6833  * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6834  * @phba: Pointer to HBA context object.
6835  *
6836  * Disable FW logging into host memory on the adapter. To
6837  * be done before reading logs from the host memory.
6838  **/
6839 void
6840 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6841 {
6842         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6843
6844         spin_lock_irq(&phba->hbalock);
6845         ras_fwlog->state = INACTIVE;
6846         spin_unlock_irq(&phba->hbalock);
6847
6848         /* Disable FW logging to host memory */
6849         writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6850                phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6851
6852         /* Wait 10ms for firmware to stop using DMA buffer */
6853         usleep_range(10 * 1000, 20 * 1000);
6854 }
6855
6856 /**
6857  * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6858  * @phba: Pointer to HBA context object.
6859  *
6860  * This function is called to free memory allocated for RAS FW logging
6861  * support in the driver.
6862  **/
6863 void
6864 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6865 {
6866         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6867         struct lpfc_dmabuf *dmabuf, *next;
6868
6869         if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6870                 list_for_each_entry_safe(dmabuf, next,
6871                                     &ras_fwlog->fwlog_buff_list,
6872                                     list) {
6873                         list_del(&dmabuf->list);
6874                         dma_free_coherent(&phba->pcidev->dev,
6875                                           LPFC_RAS_MAX_ENTRY_SIZE,
6876                                           dmabuf->virt, dmabuf->phys);
6877                         kfree(dmabuf);
6878                 }
6879         }
6880
6881         if (ras_fwlog->lwpd.virt) {
6882                 dma_free_coherent(&phba->pcidev->dev,
6883                                   sizeof(uint32_t) * 2,
6884                                   ras_fwlog->lwpd.virt,
6885                                   ras_fwlog->lwpd.phys);
6886                 ras_fwlog->lwpd.virt = NULL;
6887         }
6888
6889         spin_lock_irq(&phba->hbalock);
6890         ras_fwlog->state = INACTIVE;
6891         spin_unlock_irq(&phba->hbalock);
6892 }
6893
6894 /**
6895  * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6896  * @phba: Pointer to HBA context object.
6897  * @fwlog_buff_count: Count of buffers to be created.
6898  *
6899  * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6900  * to update FW log is posted to the adapter.
6901  * Buffer count is calculated based on module param ras_fwlog_buffsize
6902  * Size of each buffer posted to FW is 64K.
6903  **/
6904
6905 static int
6906 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6907                         uint32_t fwlog_buff_count)
6908 {
6909         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6910         struct lpfc_dmabuf *dmabuf;
6911         int rc = 0, i = 0;
6912
6913         /* Initialize List */
6914         INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6915
6916         /* Allocate memory for the LWPD */
6917         ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6918                                             sizeof(uint32_t) * 2,
6919                                             &ras_fwlog->lwpd.phys,
6920                                             GFP_KERNEL);
6921         if (!ras_fwlog->lwpd.virt) {
6922                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6923                                 "6185 LWPD Memory Alloc Failed\n");
6924
6925                 return -ENOMEM;
6926         }
6927
6928         ras_fwlog->fw_buffcount = fwlog_buff_count;
6929         for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6930                 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6931                                  GFP_KERNEL);
6932                 if (!dmabuf) {
6933                         rc = -ENOMEM;
6934                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6935                                         "6186 Memory Alloc failed FW logging");
6936                         goto free_mem;
6937                 }
6938
6939                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6940                                                   LPFC_RAS_MAX_ENTRY_SIZE,
6941                                                   &dmabuf->phys, GFP_KERNEL);
6942                 if (!dmabuf->virt) {
6943                         kfree(dmabuf);
6944                         rc = -ENOMEM;
6945                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6946                                         "6187 DMA Alloc Failed FW logging");
6947                         goto free_mem;
6948                 }
6949                 dmabuf->buffer_tag = i;
6950                 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6951         }
6952
6953 free_mem:
6954         if (rc)
6955                 lpfc_sli4_ras_dma_free(phba);
6956
6957         return rc;
6958 }
6959
6960 /**
6961  * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6962  * @phba: pointer to lpfc hba data structure.
6963  * @pmb: pointer to the driver internal queue element for mailbox command.
6964  *
6965  * Completion handler for driver's RAS MBX command to the device.
6966  **/
6967 static void
6968 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6969 {
6970         MAILBOX_t *mb;
6971         union lpfc_sli4_cfg_shdr *shdr;
6972         uint32_t shdr_status, shdr_add_status;
6973         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6974
6975         mb = &pmb->u.mb;
6976
6977         shdr = (union lpfc_sli4_cfg_shdr *)
6978                 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6979         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6980         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6981
6982         if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6983                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6984                                 "6188 FW LOG mailbox "
6985                                 "completed with status x%x add_status x%x,"
6986                                 " mbx status x%x\n",
6987                                 shdr_status, shdr_add_status, mb->mbxStatus);
6988
6989                 ras_fwlog->ras_hwsupport = false;
6990                 goto disable_ras;
6991         }
6992
6993         spin_lock_irq(&phba->hbalock);
6994         ras_fwlog->state = ACTIVE;
6995         spin_unlock_irq(&phba->hbalock);
6996         mempool_free(pmb, phba->mbox_mem_pool);
6997
6998         return;
6999
7000 disable_ras:
7001         /* Free RAS DMA memory */
7002         lpfc_sli4_ras_dma_free(phba);
7003         mempool_free(pmb, phba->mbox_mem_pool);
7004 }
7005
7006 /**
7007  * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7008  * @phba: pointer to lpfc hba data structure.
7009  * @fwlog_level: Logging verbosity level.
7010  * @fwlog_enable: Enable/Disable logging.
7011  *
7012  * Initialize memory and post mailbox command to enable FW logging in host
7013  * memory.
7014  **/
7015 int
7016 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7017                          uint32_t fwlog_level,
7018                          uint32_t fwlog_enable)
7019 {
7020         struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7021         struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7022         struct lpfc_dmabuf *dmabuf;
7023         LPFC_MBOXQ_t *mbox;
7024         uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7025         int rc = 0;
7026
7027         spin_lock_irq(&phba->hbalock);
7028         ras_fwlog->state = INACTIVE;
7029         spin_unlock_irq(&phba->hbalock);
7030
7031         fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7032                           phba->cfg_ras_fwlog_buffsize);
7033         fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7034
7035         /*
7036          * If re-enabling FW logging support use earlier allocated
7037          * DMA buffers while posting MBX command.
7038          **/
7039         if (!ras_fwlog->lwpd.virt) {
7040                 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7041                 if (rc) {
7042                         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7043                                         "6189 FW Log Memory Allocation Failed");
7044                         return rc;
7045                 }
7046         }
7047
7048         /* Setup Mailbox command */
7049         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7050         if (!mbox) {
7051                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7052                                 "6190 RAS MBX Alloc Failed");
7053                 rc = -ENOMEM;
7054                 goto mem_free;
7055         }
7056
7057         ras_fwlog->fw_loglevel = fwlog_level;
7058         len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7059                 sizeof(struct lpfc_sli4_cfg_mhdr));
7060
7061         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7062                          LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7063                          len, LPFC_SLI4_MBX_EMBED);
7064
7065         mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7066         bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7067                fwlog_enable);
7068         bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7069                ras_fwlog->fw_loglevel);
7070         bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7071                ras_fwlog->fw_buffcount);
7072         bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7073                LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7074
7075         /* Update DMA buffer address */
7076         list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7077                 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7078
7079                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7080                         putPaddrLow(dmabuf->phys);
7081
7082                 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7083                         putPaddrHigh(dmabuf->phys);
7084         }
7085
7086         /* Update LPWD address */
7087         mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7088         mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7089
7090         spin_lock_irq(&phba->hbalock);
7091         ras_fwlog->state = REG_INPROGRESS;
7092         spin_unlock_irq(&phba->hbalock);
7093         mbox->vport = phba->pport;
7094         mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7095
7096         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7097
7098         if (rc == MBX_NOT_FINISHED) {
7099                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7100                                 "6191 FW-Log Mailbox failed. "
7101                                 "status %d mbxStatus : x%x", rc,
7102                                 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7103                 mempool_free(mbox, phba->mbox_mem_pool);
7104                 rc = -EIO;
7105                 goto mem_free;
7106         } else
7107                 rc = 0;
7108 mem_free:
7109         if (rc)
7110                 lpfc_sli4_ras_dma_free(phba);
7111
7112         return rc;
7113 }
7114
7115 /**
7116  * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7117  * @phba: Pointer to HBA context object.
7118  *
7119  * Check if RAS is supported on the adapter and initialize it.
7120  **/
7121 void
7122 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7123 {
7124         /* Check RAS FW Log needs to be enabled or not */
7125         if (lpfc_check_fwlog_support(phba))
7126                 return;
7127
7128         lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7129                                  LPFC_RAS_ENABLE_LOGGING);
7130 }
7131
7132 /**
7133  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7134  * @phba: Pointer to HBA context object.
7135  *
7136  * This function allocates all SLI4 resource identifiers.
7137  **/
7138 int
7139 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7140 {
7141         int i, rc, error = 0;
7142         uint16_t count, base;
7143         unsigned long longs;
7144
7145         if (!phba->sli4_hba.rpi_hdrs_in_use)
7146                 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7147         if (phba->sli4_hba.extents_in_use) {
7148                 /*
7149                  * The port supports resource extents. The XRI, VPI, VFI, RPI
7150                  * resource extent count must be read and allocated before
7151                  * provisioning the resource id arrays.
7152                  */
7153                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7154                     LPFC_IDX_RSRC_RDY) {
7155                         /*
7156                          * Extent-based resources are set - the driver could
7157                          * be in a port reset. Figure out if any corrective
7158                          * actions need to be taken.
7159                          */
7160                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7161                                                  LPFC_RSC_TYPE_FCOE_VFI);
7162                         if (rc != 0)
7163                                 error++;
7164                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7165                                                  LPFC_RSC_TYPE_FCOE_VPI);
7166                         if (rc != 0)
7167                                 error++;
7168                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7169                                                  LPFC_RSC_TYPE_FCOE_XRI);
7170                         if (rc != 0)
7171                                 error++;
7172                         rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7173                                                  LPFC_RSC_TYPE_FCOE_RPI);
7174                         if (rc != 0)
7175                                 error++;
7176
7177                         /*
7178                          * It's possible that the number of resources
7179                          * provided to this port instance changed between
7180                          * resets.  Detect this condition and reallocate
7181                          * resources.  Otherwise, there is no action.
7182                          */
7183                         if (error) {
7184                                 lpfc_printf_log(phba, KERN_INFO,
7185                                                 LOG_MBOX | LOG_INIT,
7186                                                 "2931 Detected extent resource "
7187                                                 "change.  Reallocating all "
7188                                                 "extents.\n");
7189                                 rc = lpfc_sli4_dealloc_extent(phba,
7190                                                  LPFC_RSC_TYPE_FCOE_VFI);
7191                                 rc = lpfc_sli4_dealloc_extent(phba,
7192                                                  LPFC_RSC_TYPE_FCOE_VPI);
7193                                 rc = lpfc_sli4_dealloc_extent(phba,
7194                                                  LPFC_RSC_TYPE_FCOE_XRI);
7195                                 rc = lpfc_sli4_dealloc_extent(phba,
7196                                                  LPFC_RSC_TYPE_FCOE_RPI);
7197                         } else
7198                                 return 0;
7199                 }
7200
7201                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7202                 if (unlikely(rc))
7203                         goto err_exit;
7204
7205                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7206                 if (unlikely(rc))
7207                         goto err_exit;
7208
7209                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7210                 if (unlikely(rc))
7211                         goto err_exit;
7212
7213                 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7214                 if (unlikely(rc))
7215                         goto err_exit;
7216                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7217                        LPFC_IDX_RSRC_RDY);
7218                 return rc;
7219         } else {
7220                 /*
7221                  * The port does not support resource extents.  The XRI, VPI,
7222                  * VFI, RPI resource ids were determined from READ_CONFIG.
7223                  * Just allocate the bitmasks and provision the resource id
7224                  * arrays.  If a port reset is active, the resources don't
7225                  * need any action - just exit.
7226                  */
7227                 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7228                     LPFC_IDX_RSRC_RDY) {
7229                         lpfc_sli4_dealloc_resource_identifiers(phba);
7230                         lpfc_sli4_remove_rpis(phba);
7231                 }
7232                 /* RPIs. */
7233                 count = phba->sli4_hba.max_cfg_param.max_rpi;
7234                 if (count <= 0) {
7235                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7236                                         "3279 Invalid provisioning of "
7237                                         "rpi:%d\n", count);
7238                         rc = -EINVAL;
7239                         goto err_exit;
7240                 }
7241                 base = phba->sli4_hba.max_cfg_param.rpi_base;
7242                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7243                 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7244                                                    sizeof(unsigned long),
7245                                                    GFP_KERNEL);
7246                 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7247                         rc = -ENOMEM;
7248                         goto err_exit;
7249                 }
7250                 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7251                                                  GFP_KERNEL);
7252                 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7253                         rc = -ENOMEM;
7254                         goto free_rpi_bmask;
7255                 }
7256
7257                 for (i = 0; i < count; i++)
7258                         phba->sli4_hba.rpi_ids[i] = base + i;
7259
7260                 /* VPIs. */
7261                 count = phba->sli4_hba.max_cfg_param.max_vpi;
7262                 if (count <= 0) {
7263                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7264                                         "3280 Invalid provisioning of "
7265                                         "vpi:%d\n", count);
7266                         rc = -EINVAL;
7267                         goto free_rpi_ids;
7268                 }
7269                 base = phba->sli4_hba.max_cfg_param.vpi_base;
7270                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7271                 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7272                                           GFP_KERNEL);
7273                 if (unlikely(!phba->vpi_bmask)) {
7274                         rc = -ENOMEM;
7275                         goto free_rpi_ids;
7276                 }
7277                 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7278                                         GFP_KERNEL);
7279                 if (unlikely(!phba->vpi_ids)) {
7280                         rc = -ENOMEM;
7281                         goto free_vpi_bmask;
7282                 }
7283
7284                 for (i = 0; i < count; i++)
7285                         phba->vpi_ids[i] = base + i;
7286
7287                 /* XRIs. */
7288                 count = phba->sli4_hba.max_cfg_param.max_xri;
7289                 if (count <= 0) {
7290                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7291                                         "3281 Invalid provisioning of "
7292                                         "xri:%d\n", count);
7293                         rc = -EINVAL;
7294                         goto free_vpi_ids;
7295                 }
7296                 base = phba->sli4_hba.max_cfg_param.xri_base;
7297                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7298                 phba->sli4_hba.xri_bmask = kcalloc(longs,
7299                                                    sizeof(unsigned long),
7300                                                    GFP_KERNEL);
7301                 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7302                         rc = -ENOMEM;
7303                         goto free_vpi_ids;
7304                 }
7305                 phba->sli4_hba.max_cfg_param.xri_used = 0;
7306                 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7307                                                  GFP_KERNEL);
7308                 if (unlikely(!phba->sli4_hba.xri_ids)) {
7309                         rc = -ENOMEM;
7310                         goto free_xri_bmask;
7311                 }
7312
7313                 for (i = 0; i < count; i++)
7314                         phba->sli4_hba.xri_ids[i] = base + i;
7315
7316                 /* VFIs. */
7317                 count = phba->sli4_hba.max_cfg_param.max_vfi;
7318                 if (count <= 0) {
7319                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7320                                         "3282 Invalid provisioning of "
7321                                         "vfi:%d\n", count);
7322                         rc = -EINVAL;
7323                         goto free_xri_ids;
7324                 }
7325                 base = phba->sli4_hba.max_cfg_param.vfi_base;
7326                 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7327                 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7328                                                    sizeof(unsigned long),
7329                                                    GFP_KERNEL);
7330                 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7331                         rc = -ENOMEM;
7332                         goto free_xri_ids;
7333                 }
7334                 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7335                                                  GFP_KERNEL);
7336                 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7337                         rc = -ENOMEM;
7338                         goto free_vfi_bmask;
7339                 }
7340
7341                 for (i = 0; i < count; i++)
7342                         phba->sli4_hba.vfi_ids[i] = base + i;
7343
7344                 /*
7345                  * Mark all resources ready.  An HBA reset doesn't need
7346                  * to reset the initialization.
7347                  */
7348                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7349                        LPFC_IDX_RSRC_RDY);
7350                 return 0;
7351         }
7352
7353  free_vfi_bmask:
7354         kfree(phba->sli4_hba.vfi_bmask);
7355         phba->sli4_hba.vfi_bmask = NULL;
7356  free_xri_ids:
7357         kfree(phba->sli4_hba.xri_ids);
7358         phba->sli4_hba.xri_ids = NULL;
7359  free_xri_bmask:
7360         kfree(phba->sli4_hba.xri_bmask);
7361         phba->sli4_hba.xri_bmask = NULL;
7362  free_vpi_ids:
7363         kfree(phba->vpi_ids);
7364         phba->vpi_ids = NULL;
7365  free_vpi_bmask:
7366         kfree(phba->vpi_bmask);
7367         phba->vpi_bmask = NULL;
7368  free_rpi_ids:
7369         kfree(phba->sli4_hba.rpi_ids);
7370         phba->sli4_hba.rpi_ids = NULL;
7371  free_rpi_bmask:
7372         kfree(phba->sli4_hba.rpi_bmask);
7373         phba->sli4_hba.rpi_bmask = NULL;
7374  err_exit:
7375         return rc;
7376 }
7377
7378 /**
7379  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7380  * @phba: Pointer to HBA context object.
7381  *
7382  * This function allocates the number of elements for the specified
7383  * resource type.
7384  **/
7385 int
7386 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7387 {
7388         if (phba->sli4_hba.extents_in_use) {
7389                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7390                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7391                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7392                 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7393         } else {
7394                 kfree(phba->vpi_bmask);
7395                 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7396                 kfree(phba->vpi_ids);
7397                 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7398                 kfree(phba->sli4_hba.xri_bmask);
7399                 kfree(phba->sli4_hba.xri_ids);
7400                 kfree(phba->sli4_hba.vfi_bmask);
7401                 kfree(phba->sli4_hba.vfi_ids);
7402                 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7403                 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7404         }
7405
7406         return 0;
7407 }
7408
7409 /**
7410  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7411  * @phba: Pointer to HBA context object.
7412  * @type: The resource extent type.
7413  * @extnt_cnt: buffer to hold port extent count response
7414  * @extnt_size: buffer to hold port extent size response.
7415  *
7416  * This function calls the port to read the host allocated extents
7417  * for a particular type.
7418  **/
7419 int
7420 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7421                                uint16_t *extnt_cnt, uint16_t *extnt_size)
7422 {
7423         bool emb;
7424         int rc = 0;
7425         uint16_t curr_blks = 0;
7426         uint32_t req_len, emb_len;
7427         uint32_t alloc_len, mbox_tmo;
7428         struct list_head *blk_list_head;
7429         struct lpfc_rsrc_blks *rsrc_blk;
7430         LPFC_MBOXQ_t *mbox;
7431         void *virtaddr = NULL;
7432         struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7433         struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7434         union  lpfc_sli4_cfg_shdr *shdr;
7435
7436         switch (type) {
7437         case LPFC_RSC_TYPE_FCOE_VPI:
7438                 blk_list_head = &phba->lpfc_vpi_blk_list;
7439                 break;
7440         case LPFC_RSC_TYPE_FCOE_XRI:
7441                 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7442                 break;
7443         case LPFC_RSC_TYPE_FCOE_VFI:
7444                 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7445                 break;
7446         case LPFC_RSC_TYPE_FCOE_RPI:
7447                 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7448                 break;
7449         default:
7450                 return -EIO;
7451         }
7452
7453         /* Count the number of extents currently allocatd for this type. */
7454         list_for_each_entry(rsrc_blk, blk_list_head, list) {
7455                 if (curr_blks == 0) {
7456                         /*
7457                          * The GET_ALLOCATED mailbox does not return the size,
7458                          * just the count.  The size should be just the size
7459                          * stored in the current allocated block and all sizes
7460                          * for an extent type are the same so set the return
7461                          * value now.
7462                          */
7463                         *extnt_size = rsrc_blk->rsrc_size;
7464                 }
7465                 curr_blks++;
7466         }
7467
7468         /*
7469          * Calculate the size of an embedded mailbox.  The uint32_t
7470          * accounts for extents-specific word.
7471          */
7472         emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7473                 sizeof(uint32_t);
7474
7475         /*
7476          * Presume the allocation and response will fit into an embedded
7477          * mailbox.  If not true, reconfigure to a non-embedded mailbox.
7478          */
7479         emb = LPFC_SLI4_MBX_EMBED;
7480         req_len = emb_len;
7481         if (req_len > emb_len) {
7482                 req_len = curr_blks * sizeof(uint16_t) +
7483                         sizeof(union lpfc_sli4_cfg_shdr) +
7484                         sizeof(uint32_t);
7485                 emb = LPFC_SLI4_MBX_NEMBED;
7486         }
7487
7488         mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7489         if (!mbox)
7490                 return -ENOMEM;
7491         memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7492
7493         alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7494                                      LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7495                                      req_len, emb);
7496         if (alloc_len < req_len) {
7497                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7498                         "2983 Allocated DMA memory size (x%x) is "
7499                         "less than the requested DMA memory "
7500                         "size (x%x)\n", alloc_len, req_len);
7501                 rc = -ENOMEM;
7502                 goto err_exit;
7503         }
7504         rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7505         if (unlikely(rc)) {
7506                 rc = -EIO;
7507                 goto err_exit;
7508         }
7509
7510         if (!phba->sli4_hba.intr_enable)
7511                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7512         else {
7513                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7514                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7515         }
7516
7517         if (unlikely(rc)) {
7518                 rc = -EIO;
7519                 goto err_exit;
7520         }
7521
7522         /*
7523          * Figure out where the response is located.  Then get local pointers
7524          * to the response data.  The port does not guarantee to respond to
7525          * all extents counts request so update the local variable with the
7526          * allocated count from the port.
7527          */
7528         if (emb == LPFC_SLI4_MBX_EMBED) {
7529                 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7530                 shdr = &rsrc_ext->header.cfg_shdr;
7531                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7532         } else {
7533                 virtaddr = mbox->sge_array->addr[0];
7534                 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7535                 shdr = &n_rsrc->cfg_shdr;
7536                 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7537         }
7538
7539         if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7540                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7541                         "2984 Failed to read allocated resources "
7542                         "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7543                         type,
7544                         bf_get(lpfc_mbox_hdr_status, &shdr->response),
7545                         bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7546                 rc = -EIO;
7547                 goto err_exit;
7548         }
7549  err_exit:
7550         lpfc_sli4_mbox_cmd_free(phba, mbox);
7551         return rc;
7552 }
7553
7554 /**
7555  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7556  * @phba: pointer to lpfc hba data structure.
7557  * @sgl_list: linked link of sgl buffers to post
7558  * @cnt: number of linked list buffers
7559  *
7560  * This routine walks the list of buffers that have been allocated and
7561  * repost them to the port by using SGL block post. This is needed after a
7562  * pci_function_reset/warm_start or start. It attempts to construct blocks
7563  * of buffer sgls which contains contiguous xris and uses the non-embedded
7564  * SGL block post mailbox commands to post them to the port. For single
7565  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7566  * mailbox command for posting.
7567  *
7568  * Returns: 0 = success, non-zero failure.
7569  **/
7570 static int
7571 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7572                           struct list_head *sgl_list, int cnt)
7573 {
7574         struct lpfc_sglq *sglq_entry = NULL;
7575         struct lpfc_sglq *sglq_entry_next = NULL;
7576         struct lpfc_sglq *sglq_entry_first = NULL;
7577         int status, total_cnt;
7578         int post_cnt = 0, num_posted = 0, block_cnt = 0;
7579         int last_xritag = NO_XRI;
7580         LIST_HEAD(prep_sgl_list);
7581         LIST_HEAD(blck_sgl_list);
7582         LIST_HEAD(allc_sgl_list);
7583         LIST_HEAD(post_sgl_list);
7584         LIST_HEAD(free_sgl_list);
7585
7586         spin_lock_irq(&phba->hbalock);
7587         spin_lock(&phba->sli4_hba.sgl_list_lock);
7588         list_splice_init(sgl_list, &allc_sgl_list);
7589         spin_unlock(&phba->sli4_hba.sgl_list_lock);
7590         spin_unlock_irq(&phba->hbalock);
7591
7592         total_cnt = cnt;
7593         list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7594                                  &allc_sgl_list, list) {
7595                 list_del_init(&sglq_entry->list);
7596                 block_cnt++;
7597                 if ((last_xritag != NO_XRI) &&
7598                     (sglq_entry->sli4_xritag != last_xritag + 1)) {
7599                         /* a hole in xri block, form a sgl posting block */
7600                         list_splice_init(&prep_sgl_list, &blck_sgl_list);
7601                         post_cnt = block_cnt - 1;
7602                         /* prepare list for next posting block */
7603                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
7604                         block_cnt = 1;
7605                 } else {
7606                         /* prepare list for next posting block */
7607                         list_add_tail(&sglq_entry->list, &prep_sgl_list);
7608                         /* enough sgls for non-embed sgl mbox command */
7609                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7610                                 list_splice_init(&prep_sgl_list,
7611                                                  &blck_sgl_list);
7612                                 post_cnt = block_cnt;
7613                                 block_cnt = 0;
7614                         }
7615                 }
7616                 num_posted++;
7617
7618                 /* keep track of last sgl's xritag */
7619                 last_xritag = sglq_entry->sli4_xritag;
7620
7621                 /* end of repost sgl list condition for buffers */
7622                 if (num_posted == total_cnt) {
7623                         if (post_cnt == 0) {
7624                                 list_splice_init(&prep_sgl_list,
7625                                                  &blck_sgl_list);
7626                                 post_cnt = block_cnt;
7627                         } else if (block_cnt == 1) {
7628                                 status = lpfc_sli4_post_sgl(phba,
7629                                                 sglq_entry->phys, 0,
7630                                                 sglq_entry->sli4_xritag);
7631                                 if (!status) {
7632                                         /* successful, put sgl to posted list */
7633                                         list_add_tail(&sglq_entry->list,
7634                                                       &post_sgl_list);
7635                                 } else {
7636                                         /* Failure, put sgl to free list */
7637                                         lpfc_printf_log(phba, KERN_WARNING,
7638                                                 LOG_SLI,
7639                                                 "3159 Failed to post "
7640                                                 "sgl, xritag:x%x\n",
7641                                                 sglq_entry->sli4_xritag);
7642                                         list_add_tail(&sglq_entry->list,
7643                                                       &free_sgl_list);
7644                                         total_cnt--;
7645                                 }
7646                         }
7647                 }
7648
7649                 /* continue until a nembed page worth of sgls */
7650                 if (post_cnt == 0)
7651                         continue;
7652
7653                 /* post the buffer list sgls as a block */
7654                 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7655                                                  post_cnt);
7656
7657                 if (!status) {
7658                         /* success, put sgl list to posted sgl list */
7659                         list_splice_init(&blck_sgl_list, &post_sgl_list);
7660                 } else {
7661                         /* Failure, put sgl list to free sgl list */
7662                         sglq_entry_first = list_first_entry(&blck_sgl_list,
7663                                                             struct lpfc_sglq,
7664                                                             list);
7665                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7666                                         "3160 Failed to post sgl-list, "
7667                                         "xritag:x%x-x%x\n",
7668                                         sglq_entry_first->sli4_xritag,
7669                                         (sglq_entry_first->sli4_xritag +
7670                                          post_cnt - 1));
7671                         list_splice_init(&blck_sgl_list, &free_sgl_list);
7672                         total_cnt -= post_cnt;
7673                 }
7674
7675                 /* don't reset xirtag due to hole in xri block */
7676                 if (block_cnt == 0)
7677                         last_xritag = NO_XRI;
7678
7679                 /* reset sgl post count for next round of posting */
7680                 post_cnt = 0;
7681         }
7682
7683         /* free the sgls failed to post */
7684         lpfc_free_sgl_list(phba, &free_sgl_list);
7685
7686         /* push sgls posted to the available list */
7687         if (!list_empty(&post_sgl_list)) {
7688                 spin_lock_irq(&phba->hbalock);
7689                 spin_lock(&phba->sli4_hba.sgl_list_lock);
7690                 list_splice_init(&post_sgl_list, sgl_list);
7691                 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7692                 spin_unlock_irq(&phba->hbalock);
7693         } else {
7694                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7695                                 "3161 Failure to post sgl to port.\n");
7696                 return -EIO;
7697         }
7698
7699         /* return the number of XRIs actually posted */
7700         return total_cnt;
7701 }
7702
7703 /**
7704  * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7705  * @phba: pointer to lpfc hba data structure.
7706  *
7707  * This routine walks the list of nvme buffers that have been allocated and
7708  * repost them to the port by using SGL block post. This is needed after a
7709  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7710  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7711  * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7712  *
7713  * Returns: 0 = success, non-zero failure.
7714  **/
7715 static int
7716 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7717 {
7718         LIST_HEAD(post_nblist);
7719         int num_posted, rc = 0;
7720
7721         /* get all NVME buffers need to repost to a local list */
7722         lpfc_io_buf_flush(phba, &post_nblist);
7723
7724         /* post the list of nvme buffer sgls to port if available */
7725         if (!list_empty(&post_nblist)) {
7726                 num_posted = lpfc_sli4_post_io_sgl_list(
7727                         phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7728                 /* failed to post any nvme buffer, return error */
7729                 if (num_posted == 0)
7730                         rc = -EIO;
7731         }
7732         return rc;
7733 }
7734
7735 static void
7736 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7737 {
7738         uint32_t len;
7739
7740         len = sizeof(struct lpfc_mbx_set_host_data) -
7741                 sizeof(struct lpfc_sli4_cfg_mhdr);
7742         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7743                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7744                          LPFC_SLI4_MBX_EMBED);
7745
7746         mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7747         mbox->u.mqe.un.set_host_data.param_len =
7748                                         LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7749         snprintf(mbox->u.mqe.un.set_host_data.un.data,
7750                  LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7751                  "Linux %s v"LPFC_DRIVER_VERSION,
7752                  (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7753 }
7754
7755 int
7756 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7757                     struct lpfc_queue *drq, int count, int idx)
7758 {
7759         int rc, i;
7760         struct lpfc_rqe hrqe;
7761         struct lpfc_rqe drqe;
7762         struct lpfc_rqb *rqbp;
7763         unsigned long flags;
7764         struct rqb_dmabuf *rqb_buffer;
7765         LIST_HEAD(rqb_buf_list);
7766
7767         rqbp = hrq->rqbp;
7768         for (i = 0; i < count; i++) {
7769                 spin_lock_irqsave(&phba->hbalock, flags);
7770                 /* IF RQ is already full, don't bother */
7771                 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7772                         spin_unlock_irqrestore(&phba->hbalock, flags);
7773                         break;
7774                 }
7775                 spin_unlock_irqrestore(&phba->hbalock, flags);
7776
7777                 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7778                 if (!rqb_buffer)
7779                         break;
7780                 rqb_buffer->hrq = hrq;
7781                 rqb_buffer->drq = drq;
7782                 rqb_buffer->idx = idx;
7783                 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7784         }
7785
7786         spin_lock_irqsave(&phba->hbalock, flags);
7787         while (!list_empty(&rqb_buf_list)) {
7788                 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7789                                  hbuf.list);
7790
7791                 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7792                 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7793                 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7794                 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7795                 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7796                 if (rc < 0) {
7797                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7798                                         "6421 Cannot post to HRQ %d: %x %x %x "
7799                                         "DRQ %x %x\n",
7800                                         hrq->queue_id,
7801                                         hrq->host_index,
7802                                         hrq->hba_index,
7803                                         hrq->entry_count,
7804                                         drq->host_index,
7805                                         drq->hba_index);
7806                         rqbp->rqb_free_buffer(phba, rqb_buffer);
7807                 } else {
7808                         list_add_tail(&rqb_buffer->hbuf.list,
7809                                       &rqbp->rqb_buffer_list);
7810                         rqbp->buffer_count++;
7811                 }
7812         }
7813         spin_unlock_irqrestore(&phba->hbalock, flags);
7814         return 1;
7815 }
7816
7817 static void
7818 lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7819 {
7820         union lpfc_sli4_cfg_shdr *shdr;
7821         u32 shdr_status, shdr_add_status;
7822
7823         shdr = (union lpfc_sli4_cfg_shdr *)
7824                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7825         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7826         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7827         if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7828                 lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
7829                                 "4622 SET_FEATURE (x%x) mbox failed, "
7830                                 "status x%x add_status x%x, mbx status x%x\n",
7831                                 LPFC_SET_LD_SIGNAL, shdr_status,
7832                                 shdr_add_status, pmb->u.mb.mbxStatus);
7833                 phba->degrade_activate_threshold = 0;
7834                 phba->degrade_deactivate_threshold = 0;
7835                 phba->fec_degrade_interval = 0;
7836                 goto out;
7837         }
7838
7839         phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
7840         phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
7841         phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
7842
7843         lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
7844                         "4624 Success: da x%x dd x%x interval x%x\n",
7845                         phba->degrade_activate_threshold,
7846                         phba->degrade_deactivate_threshold,
7847                         phba->fec_degrade_interval);
7848 out:
7849         mempool_free(pmb, phba->mbox_mem_pool);
7850 }
7851
7852 int
7853 lpfc_read_lds_params(struct lpfc_hba *phba)
7854 {
7855         LPFC_MBOXQ_t *mboxq;
7856         int rc;
7857
7858         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7859         if (!mboxq)
7860                 return -ENOMEM;
7861
7862         lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
7863         mboxq->vport = phba->pport;
7864         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
7865         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7866         if (rc == MBX_NOT_FINISHED) {
7867                 mempool_free(mboxq, phba->mbox_mem_pool);
7868                 return -EIO;
7869         }
7870         return 0;
7871 }
7872
7873 static void
7874 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7875 {
7876         struct lpfc_vport *vport = pmb->vport;
7877         union lpfc_sli4_cfg_shdr *shdr;
7878         u32 shdr_status, shdr_add_status;
7879         u32 sig, acqe;
7880
7881         /* Two outcomes. (1) Set featurs was successul and EDC negotiation
7882          * is done. (2) Mailbox failed and send FPIN support only.
7883          */
7884         shdr = (union lpfc_sli4_cfg_shdr *)
7885                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7886         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7887         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7888         if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7889                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7890                                 "2516 CGN SET_FEATURE mbox failed with "
7891                                 "status x%x add_status x%x, mbx status x%x "
7892                                 "Reset Congestion to FPINs only\n",
7893                                 shdr_status, shdr_add_status,
7894                                 pmb->u.mb.mbxStatus);
7895                 /* If there is a mbox error, move on to RDF */
7896                 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7897                 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7898                 goto out;
7899         }
7900
7901         /* Zero out Congestion Signal ACQE counter */
7902         phba->cgn_acqe_cnt = 0;
7903
7904         acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7905                       &pmb->u.mqe.un.set_feature);
7906         sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7907                      &pmb->u.mqe.un.set_feature);
7908         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7909                         "4620 SET_FEATURES Success: Freq: %ds %dms "
7910                         " Reg: x%x x%x\n", acqe, sig,
7911                         phba->cgn_reg_signal, phba->cgn_reg_fpin);
7912 out:
7913         mempool_free(pmb, phba->mbox_mem_pool);
7914
7915         /* Register for FPIN events from the fabric now that the
7916          * EDC common_set_features has completed.
7917          */
7918         lpfc_issue_els_rdf(vport, 0);
7919 }
7920
7921 int
7922 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7923 {
7924         LPFC_MBOXQ_t *mboxq;
7925         u32 rc;
7926
7927         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7928         if (!mboxq)
7929                 goto out_rdf;
7930
7931         lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7932         mboxq->vport = phba->pport;
7933         mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7934
7935         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7936                         "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7937                         "Reg: x%x x%x\n",
7938                         phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7939                         phba->cgn_reg_signal, phba->cgn_reg_fpin);
7940
7941         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7942         if (rc == MBX_NOT_FINISHED)
7943                 goto out;
7944         return 0;
7945
7946 out:
7947         mempool_free(mboxq, phba->mbox_mem_pool);
7948 out_rdf:
7949         /* If there is a mbox error, move on to RDF */
7950         phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7951         phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7952         lpfc_issue_els_rdf(phba->pport, 0);
7953         return -EIO;
7954 }
7955
7956 /**
7957  * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7958  * @phba: pointer to lpfc hba data structure.
7959  *
7960  * This routine initializes the per-cq idle_stat to dynamically dictate
7961  * polling decisions.
7962  *
7963  * Return codes:
7964  *   None
7965  **/
7966 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7967 {
7968         int i;
7969         struct lpfc_sli4_hdw_queue *hdwq;
7970         struct lpfc_queue *cq;
7971         struct lpfc_idle_stat *idle_stat;
7972         u64 wall;
7973
7974         for_each_present_cpu(i) {
7975                 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7976                 cq = hdwq->io_cq;
7977
7978                 /* Skip if we've already handled this cq's primary CPU */
7979                 if (cq->chann != i)
7980                         continue;
7981
7982                 idle_stat = &phba->sli4_hba.idle_stat[i];
7983
7984                 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7985                 idle_stat->prev_wall = wall;
7986
7987                 if (phba->nvmet_support ||
7988                     phba->cmf_active_mode != LPFC_CFG_OFF)
7989                         cq->poll_mode = LPFC_QUEUE_WORK;
7990                 else
7991                         cq->poll_mode = LPFC_IRQ_POLL;
7992         }
7993
7994         if (!phba->nvmet_support)
7995                 schedule_delayed_work(&phba->idle_stat_delay_work,
7996                                       msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7997 }
7998
7999 static void lpfc_sli4_dip(struct lpfc_hba *phba)
8000 {
8001         uint32_t if_type;
8002
8003         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8004         if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
8005             if_type == LPFC_SLI_INTF_IF_TYPE_6) {
8006                 struct lpfc_register reg_data;
8007
8008                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8009                                &reg_data.word0))
8010                         return;
8011
8012                 if (bf_get(lpfc_sliport_status_dip, &reg_data))
8013                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8014                                         "2904 Firmware Dump Image Present"
8015                                         " on Adapter");
8016         }
8017 }
8018
8019 /**
8020  * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
8021  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8022  * @entries: Number of rx_info_entry objects to allocate in ring
8023  *
8024  * Return:
8025  * 0 - Success
8026  * ENOMEM - Failure to kmalloc
8027  **/
8028 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
8029                                 u32 entries)
8030 {
8031         rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
8032                                          GFP_KERNEL);
8033         if (!rx_monitor->ring)
8034                 return -ENOMEM;
8035
8036         rx_monitor->head_idx = 0;
8037         rx_monitor->tail_idx = 0;
8038         spin_lock_init(&rx_monitor->lock);
8039         rx_monitor->entries = entries;
8040
8041         return 0;
8042 }
8043
8044 /**
8045  * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
8046  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8047  *
8048  * Called after cancellation of cmf_timer.
8049  **/
8050 void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
8051 {
8052         kfree(rx_monitor->ring);
8053         rx_monitor->ring = NULL;
8054         rx_monitor->entries = 0;
8055         rx_monitor->head_idx = 0;
8056         rx_monitor->tail_idx = 0;
8057 }
8058
8059 /**
8060  * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
8061  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8062  * @entry: Pointer to rx_info_entry
8063  *
8064  * Used to insert an rx_info_entry into rx_monitor's ring.  Note that this is a
8065  * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
8066  *
8067  * This is called from lpfc_cmf_timer, which is in timer/softirq context.
8068  *
8069  * In cases of old data overflow, we do a best effort of FIFO order.
8070  **/
8071 void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
8072                             struct rx_info_entry *entry)
8073 {
8074         struct rx_info_entry *ring = rx_monitor->ring;
8075         u32 *head_idx = &rx_monitor->head_idx;
8076         u32 *tail_idx = &rx_monitor->tail_idx;
8077         spinlock_t *ring_lock = &rx_monitor->lock;
8078         u32 ring_size = rx_monitor->entries;
8079
8080         spin_lock(ring_lock);
8081         memcpy(&ring[*tail_idx], entry, sizeof(*entry));
8082         *tail_idx = (*tail_idx + 1) % ring_size;
8083
8084         /* Best effort of FIFO saved data */
8085         if (*tail_idx == *head_idx)
8086                 *head_idx = (*head_idx + 1) % ring_size;
8087
8088         spin_unlock(ring_lock);
8089 }
8090
8091 /**
8092  * lpfc_rx_monitor_report - Read out rx_monitor's ring
8093  * @phba: Pointer to lpfc_hba object
8094  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8095  * @buf: Pointer to char buffer that will contain rx monitor info data
8096  * @buf_len: Length buf including null char
8097  * @max_read_entries: Maximum number of entries to read out of ring
8098  *
8099  * Used to dump/read what's in rx_monitor's ring buffer.
8100  *
8101  * If buf is NULL || buf_len == 0, then it is implied that we want to log the
8102  * information to kmsg instead of filling out buf.
8103  *
8104  * Return:
8105  * Number of entries read out of the ring
8106  **/
8107 u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
8108                            struct lpfc_rx_info_monitor *rx_monitor, char *buf,
8109                            u32 buf_len, u32 max_read_entries)
8110 {
8111         struct rx_info_entry *ring = rx_monitor->ring;
8112         struct rx_info_entry *entry;
8113         u32 *head_idx = &rx_monitor->head_idx;
8114         u32 *tail_idx = &rx_monitor->tail_idx;
8115         spinlock_t *ring_lock = &rx_monitor->lock;
8116         u32 ring_size = rx_monitor->entries;
8117         u32 cnt = 0;
8118         char tmp[DBG_LOG_STR_SZ] = {0};
8119         bool log_to_kmsg = (!buf || !buf_len) ? true : false;
8120
8121         if (!log_to_kmsg) {
8122                 /* clear the buffer to be sure */
8123                 memset(buf, 0, buf_len);
8124
8125                 scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
8126                                         "%-8s%-8s%-8s%-16s\n",
8127                                         "MaxBPI", "Tot_Data_CMF",
8128                                         "Tot_Data_Cmd", "Tot_Data_Cmpl",
8129                                         "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
8130                                         "IO_cnt", "Info", "BWutil(ms)");
8131         }
8132
8133         /* Needs to be _irq because record is called from timer interrupt
8134          * context
8135          */
8136         spin_lock_irq(ring_lock);
8137         while (*head_idx != *tail_idx) {
8138                 entry = &ring[*head_idx];
8139
8140                 /* Read out this entry's data. */
8141                 if (!log_to_kmsg) {
8142                         /* If !log_to_kmsg, then store to buf. */
8143                         scnprintf(tmp, sizeof(tmp),
8144                                   "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
8145                                   "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
8146                                   *head_idx, entry->max_bytes_per_interval,
8147                                   entry->cmf_bytes, entry->total_bytes,
8148                                   entry->rcv_bytes, entry->avg_io_latency,
8149                                   entry->avg_io_size, entry->max_read_cnt,
8150                                   entry->cmf_busy, entry->io_cnt,
8151                                   entry->cmf_info, entry->timer_utilization,
8152                                   entry->timer_interval);
8153
8154                         /* Check for buffer overflow */
8155                         if ((strlen(buf) + strlen(tmp)) >= buf_len)
8156                                 break;
8157
8158                         /* Append entry's data to buffer */
8159                         strlcat(buf, tmp, buf_len);
8160                 } else {
8161                         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
8162                                         "4410 %02u: MBPI %llu Xmit %llu "
8163                                         "Cmpl %llu Lat %llu ASz %llu Info %02u "
8164                                         "BWUtil %u Int %u slot %u\n",
8165                                         cnt, entry->max_bytes_per_interval,
8166                                         entry->total_bytes, entry->rcv_bytes,
8167                                         entry->avg_io_latency,
8168                                         entry->avg_io_size, entry->cmf_info,
8169                                         entry->timer_utilization,
8170                                         entry->timer_interval, *head_idx);
8171                 }
8172
8173                 *head_idx = (*head_idx + 1) % ring_size;
8174
8175                 /* Don't feed more than max_read_entries */
8176                 cnt++;
8177                 if (cnt >= max_read_entries)
8178                         break;
8179         }
8180         spin_unlock_irq(ring_lock);
8181
8182         return cnt;
8183 }
8184
8185 /**
8186  * lpfc_cmf_setup - Initialize idle_stat tracking
8187  * @phba: Pointer to HBA context object.
8188  *
8189  * This is called from HBA setup during driver load or when the HBA
8190  * comes online. this does all the initialization to support CMF and MI.
8191  **/
8192 static int
8193 lpfc_cmf_setup(struct lpfc_hba *phba)
8194 {
8195         LPFC_MBOXQ_t *mboxq;
8196         struct lpfc_dmabuf *mp;
8197         struct lpfc_pc_sli4_params *sli4_params;
8198         int rc, cmf, mi_ver;
8199
8200         rc = lpfc_sli4_refresh_params(phba);
8201         if (unlikely(rc))
8202                 return rc;
8203
8204         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8205         if (!mboxq)
8206                 return -ENOMEM;
8207
8208         sli4_params = &phba->sli4_hba.pc_sli4_params;
8209
8210         /* Always try to enable MI feature if we can */
8211         if (sli4_params->mi_ver) {
8212                 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
8213                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8214                 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
8215                                  &mboxq->u.mqe.un.set_feature);
8216
8217                 if (rc == MBX_SUCCESS) {
8218                         if (mi_ver) {
8219                                 lpfc_printf_log(phba,
8220                                                 KERN_WARNING, LOG_CGN_MGMT,
8221                                                 "6215 MI is enabled\n");
8222                                 sli4_params->mi_ver = mi_ver;
8223                         } else {
8224                                 lpfc_printf_log(phba,
8225                                                 KERN_WARNING, LOG_CGN_MGMT,
8226                                                 "6338 MI is disabled\n");
8227                                 sli4_params->mi_ver = 0;
8228                         }
8229                 } else {
8230                         /* mi_ver is already set from GET_SLI4_PARAMETERS */
8231                         lpfc_printf_log(phba, KERN_INFO,
8232                                         LOG_CGN_MGMT | LOG_INIT,
8233                                         "6245 Enable MI Mailbox x%x (x%x/x%x) "
8234                                         "failed, rc:x%x mi:x%x\n",
8235                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8236                                         lpfc_sli_config_mbox_subsys_get
8237                                                 (phba, mboxq),
8238                                         lpfc_sli_config_mbox_opcode_get
8239                                                 (phba, mboxq),
8240                                         rc, sli4_params->mi_ver);
8241                 }
8242         } else {
8243                 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8244                                 "6217 MI is disabled\n");
8245         }
8246
8247         /* Ensure FDMI is enabled for MI if enable_mi is set */
8248         if (sli4_params->mi_ver)
8249                 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8250
8251         /* Always try to enable CMF feature if we can */
8252         if (sli4_params->cmf) {
8253                 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8254                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8255                 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8256                              &mboxq->u.mqe.un.set_feature);
8257                 if (rc == MBX_SUCCESS && cmf) {
8258                         lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8259                                         "6218 CMF is enabled: mode %d\n",
8260                                         phba->cmf_active_mode);
8261                 } else {
8262                         lpfc_printf_log(phba, KERN_WARNING,
8263                                         LOG_CGN_MGMT | LOG_INIT,
8264                                         "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8265                                         "failed, rc:x%x dd:x%x\n",
8266                                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8267                                         lpfc_sli_config_mbox_subsys_get
8268                                                 (phba, mboxq),
8269                                         lpfc_sli_config_mbox_opcode_get
8270                                                 (phba, mboxq),
8271                                         rc, cmf);
8272                         sli4_params->cmf = 0;
8273                         phba->cmf_active_mode = LPFC_CFG_OFF;
8274                         goto no_cmf;
8275                 }
8276
8277                 /* Allocate Congestion Information Buffer */
8278                 if (!phba->cgn_i) {
8279                         mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8280                         if (mp)
8281                                 mp->virt = dma_alloc_coherent
8282                                                 (&phba->pcidev->dev,
8283                                                 sizeof(struct lpfc_cgn_info),
8284                                                 &mp->phys, GFP_KERNEL);
8285                         if (!mp || !mp->virt) {
8286                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8287                                                 "2640 Failed to alloc memory "
8288                                                 "for Congestion Info\n");
8289                                 kfree(mp);
8290                                 sli4_params->cmf = 0;
8291                                 phba->cmf_active_mode = LPFC_CFG_OFF;
8292                                 goto no_cmf;
8293                         }
8294                         phba->cgn_i = mp;
8295
8296                         /* initialize congestion buffer info */
8297                         lpfc_init_congestion_buf(phba);
8298                         lpfc_init_congestion_stat(phba);
8299
8300                         /* Zero out Congestion Signal counters */
8301                         atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8302                         atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8303                 }
8304
8305                 rc = lpfc_sli4_cgn_params_read(phba);
8306                 if (rc < 0) {
8307                         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8308                                         "6242 Error reading Cgn Params (%d)\n",
8309                                         rc);
8310                         /* Ensure CGN Mode is off */
8311                         sli4_params->cmf = 0;
8312                 } else if (!rc) {
8313                         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8314                                         "6243 CGN Event empty object.\n");
8315                         /* Ensure CGN Mode is off */
8316                         sli4_params->cmf = 0;
8317                 }
8318         } else {
8319 no_cmf:
8320                 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8321                                 "6220 CMF is disabled\n");
8322         }
8323
8324         /* Only register congestion buffer with firmware if BOTH
8325          * CMF and E2E are enabled.
8326          */
8327         if (sli4_params->cmf && sli4_params->mi_ver) {
8328                 rc = lpfc_reg_congestion_buf(phba);
8329                 if (rc) {
8330                         dma_free_coherent(&phba->pcidev->dev,
8331                                           sizeof(struct lpfc_cgn_info),
8332                                           phba->cgn_i->virt, phba->cgn_i->phys);
8333                         kfree(phba->cgn_i);
8334                         phba->cgn_i = NULL;
8335                         /* Ensure CGN Mode is off */
8336                         phba->cmf_active_mode = LPFC_CFG_OFF;
8337                         sli4_params->cmf = 0;
8338                         return 0;
8339                 }
8340         }
8341         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8342                         "6470 Setup MI version %d CMF %d mode %d\n",
8343                         sli4_params->mi_ver, sli4_params->cmf,
8344                         phba->cmf_active_mode);
8345
8346         mempool_free(mboxq, phba->mbox_mem_pool);
8347
8348         /* Initialize atomic counters */
8349         atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8350         atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8351         atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8352         atomic_set(&phba->cgn_sync_warn_cnt, 0);
8353         atomic_set(&phba->cgn_driver_evt_cnt, 0);
8354         atomic_set(&phba->cgn_latency_evt_cnt, 0);
8355         atomic64_set(&phba->cgn_latency_evt, 0);
8356
8357         phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8358
8359         /* Allocate RX Monitor Buffer */
8360         if (!phba->rx_monitor) {
8361                 phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
8362                                            GFP_KERNEL);
8363
8364                 if (!phba->rx_monitor) {
8365                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8366                                         "2644 Failed to alloc memory "
8367                                         "for RX Monitor Buffer\n");
8368                         return -ENOMEM;
8369                 }
8370
8371                 /* Instruct the rx_monitor object to instantiate its ring */
8372                 if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
8373                                                 LPFC_MAX_RXMONITOR_ENTRY)) {
8374                         kfree(phba->rx_monitor);
8375                         phba->rx_monitor = NULL;
8376                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8377                                         "2645 Failed to alloc memory "
8378                                         "for RX Monitor's Ring\n");
8379                         return -ENOMEM;
8380                 }
8381         }
8382
8383         return 0;
8384 }
8385
8386 static int
8387 lpfc_set_host_tm(struct lpfc_hba *phba)
8388 {
8389         LPFC_MBOXQ_t *mboxq;
8390         uint32_t len, rc;
8391         struct timespec64 cur_time;
8392         struct tm broken;
8393         uint32_t month, day, year;
8394         uint32_t hour, minute, second;
8395         struct lpfc_mbx_set_host_date_time *tm;
8396
8397         mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8398         if (!mboxq)
8399                 return -ENOMEM;
8400
8401         len = sizeof(struct lpfc_mbx_set_host_data) -
8402                 sizeof(struct lpfc_sli4_cfg_mhdr);
8403         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8404                          LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8405                          LPFC_SLI4_MBX_EMBED);
8406
8407         mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8408         mboxq->u.mqe.un.set_host_data.param_len =
8409                         sizeof(struct lpfc_mbx_set_host_date_time);
8410         tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8411         ktime_get_real_ts64(&cur_time);
8412         time64_to_tm(cur_time.tv_sec, 0, &broken);
8413         month = broken.tm_mon + 1;
8414         day = broken.tm_mday;
8415         year = broken.tm_year - 100;
8416         hour = broken.tm_hour;
8417         minute = broken.tm_min;
8418         second = broken.tm_sec;
8419         bf_set(lpfc_mbx_set_host_month, tm, month);
8420         bf_set(lpfc_mbx_set_host_day, tm, day);
8421         bf_set(lpfc_mbx_set_host_year, tm, year);
8422         bf_set(lpfc_mbx_set_host_hour, tm, hour);
8423         bf_set(lpfc_mbx_set_host_min, tm, minute);
8424         bf_set(lpfc_mbx_set_host_sec, tm, second);
8425
8426         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8427         mempool_free(mboxq, phba->mbox_mem_pool);
8428         return rc;
8429 }
8430
8431 /**
8432  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8433  * @phba: Pointer to HBA context object.
8434  *
8435  * This function is the main SLI4 device initialization PCI function. This
8436  * function is called by the HBA initialization code, HBA reset code and
8437  * HBA error attention handler code. Caller is not required to hold any
8438  * locks.
8439  **/
8440 int
8441 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8442 {
8443         int rc, i, cnt, len, dd;
8444         LPFC_MBOXQ_t *mboxq;
8445         struct lpfc_mqe *mqe;
8446         uint8_t *vpd;
8447         uint32_t vpd_size;
8448         uint32_t ftr_rsp = 0;
8449         struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8450         struct lpfc_vport *vport = phba->pport;
8451         struct lpfc_dmabuf *mp;
8452         struct lpfc_rqb *rqbp;
8453         u32 flg;
8454
8455         /* Perform a PCI function reset to start from clean */
8456         rc = lpfc_pci_function_reset(phba);
8457         if (unlikely(rc))
8458                 return -ENODEV;
8459
8460         /* Check the HBA Host Status Register for readyness */
8461         rc = lpfc_sli4_post_status_check(phba);
8462         if (unlikely(rc))
8463                 return -ENODEV;
8464         else {
8465                 spin_lock_irq(&phba->hbalock);
8466                 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8467                 flg = phba->sli.sli_flag;
8468                 spin_unlock_irq(&phba->hbalock);
8469                 /* Allow a little time after setting SLI_ACTIVE for any polled
8470                  * MBX commands to complete via BSG.
8471                  */
8472                 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8473                         msleep(20);
8474                         spin_lock_irq(&phba->hbalock);
8475                         flg = phba->sli.sli_flag;
8476                         spin_unlock_irq(&phba->hbalock);
8477                 }
8478         }
8479
8480         lpfc_sli4_dip(phba);
8481
8482         /*
8483          * Allocate a single mailbox container for initializing the
8484          * port.
8485          */
8486         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8487         if (!mboxq)
8488                 return -ENOMEM;
8489
8490         /* Issue READ_REV to collect vpd and FW information. */
8491         vpd_size = SLI4_PAGE_SIZE;
8492         vpd = kzalloc(vpd_size, GFP_KERNEL);
8493         if (!vpd) {
8494                 rc = -ENOMEM;
8495                 goto out_free_mbox;
8496         }
8497
8498         rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8499         if (unlikely(rc)) {
8500                 kfree(vpd);
8501                 goto out_free_mbox;
8502         }
8503
8504         mqe = &mboxq->u.mqe;
8505         phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8506         if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8507                 phba->hba_flag |= HBA_FCOE_MODE;
8508                 phba->fcp_embed_io = 0; /* SLI4 FC support only */
8509         } else {
8510                 phba->hba_flag &= ~HBA_FCOE_MODE;
8511         }
8512
8513         if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8514                 LPFC_DCBX_CEE_MODE)
8515                 phba->hba_flag |= HBA_FIP_SUPPORT;
8516         else
8517                 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8518
8519         phba->hba_flag &= ~HBA_IOQ_FLUSH;
8520
8521         if (phba->sli_rev != LPFC_SLI_REV4) {
8522                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8523                         "0376 READ_REV Error. SLI Level %d "
8524                         "FCoE enabled %d\n",
8525                         phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8526                 rc = -EIO;
8527                 kfree(vpd);
8528                 goto out_free_mbox;
8529         }
8530
8531         rc = lpfc_set_host_tm(phba);
8532         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8533                         "6468 Set host date / time: Status x%x:\n", rc);
8534
8535         /*
8536          * Continue initialization with default values even if driver failed
8537          * to read FCoE param config regions, only read parameters if the
8538          * board is FCoE
8539          */
8540         if (phba->hba_flag & HBA_FCOE_MODE &&
8541             lpfc_sli4_read_fcoe_params(phba))
8542                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8543                         "2570 Failed to read FCoE parameters\n");
8544
8545         /*
8546          * Retrieve sli4 device physical port name, failure of doing it
8547          * is considered as non-fatal.
8548          */
8549         rc = lpfc_sli4_retrieve_pport_name(phba);
8550         if (!rc)
8551                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8552                                 "3080 Successful retrieving SLI4 device "
8553                                 "physical port name: %s.\n", phba->Port);
8554
8555         rc = lpfc_sli4_get_ctl_attr(phba);
8556         if (!rc)
8557                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8558                                 "8351 Successful retrieving SLI4 device "
8559                                 "CTL ATTR\n");
8560
8561         /*
8562          * Evaluate the read rev and vpd data. Populate the driver
8563          * state with the results. If this routine fails, the failure
8564          * is not fatal as the driver will use generic values.
8565          */
8566         rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8567         if (unlikely(!rc)) {
8568                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8569                                 "0377 Error %d parsing vpd. "
8570                                 "Using defaults.\n", rc);
8571                 rc = 0;
8572         }
8573         kfree(vpd);
8574
8575         /* Save information as VPD data */
8576         phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8577         phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8578
8579         /*
8580          * This is because first G7 ASIC doesn't support the standard
8581          * 0x5a NVME cmd descriptor type/subtype
8582          */
8583         if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8584                         LPFC_SLI_INTF_IF_TYPE_6) &&
8585             (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8586             (phba->vpd.rev.smRev == 0) &&
8587             (phba->cfg_nvme_embed_cmd == 1))
8588                 phba->cfg_nvme_embed_cmd = 0;
8589
8590         phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8591         phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8592                                          &mqe->un.read_rev);
8593         phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8594                                        &mqe->un.read_rev);
8595         phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8596                                             &mqe->un.read_rev);
8597         phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8598                                            &mqe->un.read_rev);
8599         phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8600         memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8601         phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8602         memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8603         phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8604         memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8605         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8606                         "(%d):0380 READ_REV Status x%x "
8607                         "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8608                         mboxq->vport ? mboxq->vport->vpi : 0,
8609                         bf_get(lpfc_mqe_status, mqe),
8610                         phba->vpd.rev.opFwName,
8611                         phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8612                         phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8613
8614         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8615             LPFC_SLI_INTF_IF_TYPE_0) {
8616                 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8617                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8618                 if (rc == MBX_SUCCESS) {
8619                         phba->hba_flag |= HBA_RECOVERABLE_UE;
8620                         /* Set 1Sec interval to detect UE */
8621                         phba->eratt_poll_interval = 1;
8622                         phba->sli4_hba.ue_to_sr = bf_get(
8623                                         lpfc_mbx_set_feature_UESR,
8624                                         &mboxq->u.mqe.un.set_feature);
8625                         phba->sli4_hba.ue_to_rp = bf_get(
8626                                         lpfc_mbx_set_feature_UERP,
8627                                         &mboxq->u.mqe.un.set_feature);
8628                 }
8629         }
8630
8631         if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8632                 /* Enable MDS Diagnostics only if the SLI Port supports it */
8633                 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8634                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8635                 if (rc != MBX_SUCCESS)
8636                         phba->mds_diags_support = 0;
8637         }
8638
8639         /*
8640          * Discover the port's supported feature set and match it against the
8641          * hosts requests.
8642          */
8643         lpfc_request_features(phba, mboxq);
8644         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8645         if (unlikely(rc)) {
8646                 rc = -EIO;
8647                 goto out_free_mbox;
8648         }
8649
8650         /* Disable VMID if app header is not supported */
8651         if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8652                                                   &mqe->un.req_ftrs))) {
8653                 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8654                 phba->cfg_vmid_app_header = 0;
8655                 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8656                                 "1242 vmid feature not supported\n");
8657         }
8658
8659         /*
8660          * The port must support FCP initiator mode as this is the
8661          * only mode running in the host.
8662          */
8663         if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8664                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8665                                 "0378 No support for fcpi mode.\n");
8666                 ftr_rsp++;
8667         }
8668
8669         /* Performance Hints are ONLY for FCoE */
8670         if (phba->hba_flag & HBA_FCOE_MODE) {
8671                 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8672                         phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8673                 else
8674                         phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8675         }
8676
8677         /*
8678          * If the port cannot support the host's requested features
8679          * then turn off the global config parameters to disable the
8680          * feature in the driver.  This is not a fatal error.
8681          */
8682         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8683                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8684                         phba->cfg_enable_bg = 0;
8685                         phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8686                         ftr_rsp++;
8687                 }
8688         }
8689
8690         if (phba->max_vpi && phba->cfg_enable_npiv &&
8691             !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8692                 ftr_rsp++;
8693
8694         if (ftr_rsp) {
8695                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8696                                 "0379 Feature Mismatch Data: x%08x %08x "
8697                                 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8698                                 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8699                                 phba->cfg_enable_npiv, phba->max_vpi);
8700                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8701                         phba->cfg_enable_bg = 0;
8702                 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8703                         phba->cfg_enable_npiv = 0;
8704         }
8705
8706         /* These SLI3 features are assumed in SLI4 */
8707         spin_lock_irq(&phba->hbalock);
8708         phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8709         spin_unlock_irq(&phba->hbalock);
8710
8711         /* Always try to enable dual dump feature if we can */
8712         lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8713         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8714         dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8715         if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8716                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8717                                 "6448 Dual Dump is enabled\n");
8718         else
8719                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8720                                 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8721                                 "rc:x%x dd:x%x\n",
8722                                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8723                                 lpfc_sli_config_mbox_subsys_get(
8724                                         phba, mboxq),
8725                                 lpfc_sli_config_mbox_opcode_get(
8726                                         phba, mboxq),
8727                                 rc, dd);
8728         /*
8729          * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
8730          * calls depends on these resources to complete port setup.
8731          */
8732         rc = lpfc_sli4_alloc_resource_identifiers(phba);
8733         if (rc) {
8734                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8735                                 "2920 Failed to alloc Resource IDs "
8736                                 "rc = x%x\n", rc);
8737                 goto out_free_mbox;
8738         }
8739
8740         lpfc_set_host_data(phba, mboxq);
8741
8742         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8743         if (rc) {
8744                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8745                                 "2134 Failed to set host os driver version %x",
8746                                 rc);
8747         }
8748
8749         /* Read the port's service parameters. */
8750         rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8751         if (rc) {
8752                 phba->link_state = LPFC_HBA_ERROR;
8753                 rc = -ENOMEM;
8754                 goto out_free_mbox;
8755         }
8756
8757         mboxq->vport = vport;
8758         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8759         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8760         if (rc == MBX_SUCCESS) {
8761                 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8762                 rc = 0;
8763         }
8764
8765         /*
8766          * This memory was allocated by the lpfc_read_sparam routine but is
8767          * no longer needed.  It is released and ctx_buf NULLed to prevent
8768          * unintended pointer access as the mbox is reused.
8769          */
8770         lpfc_mbuf_free(phba, mp->virt, mp->phys);
8771         kfree(mp);
8772         mboxq->ctx_buf = NULL;
8773         if (unlikely(rc)) {
8774                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8775                                 "0382 READ_SPARAM command failed "
8776                                 "status %d, mbxStatus x%x\n",
8777                                 rc, bf_get(lpfc_mqe_status, mqe));
8778                 phba->link_state = LPFC_HBA_ERROR;
8779                 rc = -EIO;
8780                 goto out_free_mbox;
8781         }
8782
8783         lpfc_update_vport_wwn(vport);
8784
8785         /* Update the fc_host data structures with new wwn. */
8786         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8787         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8788
8789         /* Create all the SLI4 queues */
8790         rc = lpfc_sli4_queue_create(phba);
8791         if (rc) {
8792                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8793                                 "3089 Failed to allocate queues\n");
8794                 rc = -ENODEV;
8795                 goto out_free_mbox;
8796         }
8797         /* Set up all the queues to the device */
8798         rc = lpfc_sli4_queue_setup(phba);
8799         if (unlikely(rc)) {
8800                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8801                                 "0381 Error %d during queue setup.\n ", rc);
8802                 goto out_stop_timers;
8803         }
8804         /* Initialize the driver internal SLI layer lists. */
8805         lpfc_sli4_setup(phba);
8806         lpfc_sli4_queue_init(phba);
8807
8808         /* update host els xri-sgl sizes and mappings */
8809         rc = lpfc_sli4_els_sgl_update(phba);
8810         if (unlikely(rc)) {
8811                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8812                                 "1400 Failed to update xri-sgl size and "
8813                                 "mapping: %d\n", rc);
8814                 goto out_destroy_queue;
8815         }
8816
8817         /* register the els sgl pool to the port */
8818         rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8819                                        phba->sli4_hba.els_xri_cnt);
8820         if (unlikely(rc < 0)) {
8821                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8822                                 "0582 Error %d during els sgl post "
8823                                 "operation\n", rc);
8824                 rc = -ENODEV;
8825                 goto out_destroy_queue;
8826         }
8827         phba->sli4_hba.els_xri_cnt = rc;
8828
8829         if (phba->nvmet_support) {
8830                 /* update host nvmet xri-sgl sizes and mappings */
8831                 rc = lpfc_sli4_nvmet_sgl_update(phba);
8832                 if (unlikely(rc)) {
8833                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8834                                         "6308 Failed to update nvmet-sgl size "
8835                                         "and mapping: %d\n", rc);
8836                         goto out_destroy_queue;
8837                 }
8838
8839                 /* register the nvmet sgl pool to the port */
8840                 rc = lpfc_sli4_repost_sgl_list(
8841                         phba,
8842                         &phba->sli4_hba.lpfc_nvmet_sgl_list,
8843                         phba->sli4_hba.nvmet_xri_cnt);
8844                 if (unlikely(rc < 0)) {
8845                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8846                                         "3117 Error %d during nvmet "
8847                                         "sgl post\n", rc);
8848                         rc = -ENODEV;
8849                         goto out_destroy_queue;
8850                 }
8851                 phba->sli4_hba.nvmet_xri_cnt = rc;
8852
8853                 /* We allocate an iocbq for every receive context SGL.
8854                  * The additional allocation is for abort and ls handling.
8855                  */
8856                 cnt = phba->sli4_hba.nvmet_xri_cnt +
8857                         phba->sli4_hba.max_cfg_param.max_xri;
8858         } else {
8859                 /* update host common xri-sgl sizes and mappings */
8860                 rc = lpfc_sli4_io_sgl_update(phba);
8861                 if (unlikely(rc)) {
8862                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8863                                         "6082 Failed to update nvme-sgl size "
8864                                         "and mapping: %d\n", rc);
8865                         goto out_destroy_queue;
8866                 }
8867
8868                 /* register the allocated common sgl pool to the port */
8869                 rc = lpfc_sli4_repost_io_sgl_list(phba);
8870                 if (unlikely(rc)) {
8871                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8872                                         "6116 Error %d during nvme sgl post "
8873                                         "operation\n", rc);
8874                         /* Some NVME buffers were moved to abort nvme list */
8875                         /* A pci function reset will repost them */
8876                         rc = -ENODEV;
8877                         goto out_destroy_queue;
8878                 }
8879                 /* Each lpfc_io_buf job structure has an iocbq element.
8880                  * This cnt provides for abort, els, ct and ls requests.
8881                  */
8882                 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8883         }
8884
8885         if (!phba->sli.iocbq_lookup) {
8886                 /* Initialize and populate the iocb list per host */
8887                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8888                                 "2821 initialize iocb list with %d entries\n",
8889                                 cnt);
8890                 rc = lpfc_init_iocb_list(phba, cnt);
8891                 if (rc) {
8892                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8893                                         "1413 Failed to init iocb list.\n");
8894                         goto out_destroy_queue;
8895                 }
8896         }
8897
8898         if (phba->nvmet_support)
8899                 lpfc_nvmet_create_targetport(phba);
8900
8901         if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8902                 /* Post initial buffers to all RQs created */
8903                 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8904                         rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8905                         INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8906                         rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8907                         rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8908                         rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8909                         rqbp->buffer_count = 0;
8910
8911                         lpfc_post_rq_buffer(
8912                                 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8913                                 phba->sli4_hba.nvmet_mrq_data[i],
8914                                 phba->cfg_nvmet_mrq_post, i);
8915                 }
8916         }
8917
8918         /* Post the rpi header region to the device. */
8919         rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8920         if (unlikely(rc)) {
8921                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8922                                 "0393 Error %d during rpi post operation\n",
8923                                 rc);
8924                 rc = -ENODEV;
8925                 goto out_free_iocblist;
8926         }
8927         lpfc_sli4_node_prep(phba);
8928
8929         if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8930                 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8931                         /*
8932                          * The FC Port needs to register FCFI (index 0)
8933                          */
8934                         lpfc_reg_fcfi(phba, mboxq);
8935                         mboxq->vport = phba->pport;
8936                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8937                         if (rc != MBX_SUCCESS)
8938                                 goto out_unset_queue;
8939                         rc = 0;
8940                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8941                                                 &mboxq->u.mqe.un.reg_fcfi);
8942                 } else {
8943                         /* We are a NVME Target mode with MRQ > 1 */
8944
8945                         /* First register the FCFI */
8946                         lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8947                         mboxq->vport = phba->pport;
8948                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8949                         if (rc != MBX_SUCCESS)
8950                                 goto out_unset_queue;
8951                         rc = 0;
8952                         phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8953                                                 &mboxq->u.mqe.un.reg_fcfi_mrq);
8954
8955                         /* Next register the MRQs */
8956                         lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8957                         mboxq->vport = phba->pport;
8958                         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8959                         if (rc != MBX_SUCCESS)
8960                                 goto out_unset_queue;
8961                         rc = 0;
8962                 }
8963                 /* Check if the port is configured to be disabled */
8964                 lpfc_sli_read_link_ste(phba);
8965         }
8966
8967         /* Don't post more new bufs if repost already recovered
8968          * the nvme sgls.
8969          */
8970         if (phba->nvmet_support == 0) {
8971                 if (phba->sli4_hba.io_xri_cnt == 0) {
8972                         len = lpfc_new_io_buf(
8973                                               phba, phba->sli4_hba.io_xri_max);
8974                         if (len == 0) {
8975                                 rc = -ENOMEM;
8976                                 goto out_unset_queue;
8977                         }
8978
8979                         if (phba->cfg_xri_rebalancing)
8980                                 lpfc_create_multixri_pools(phba);
8981                 }
8982         } else {
8983                 phba->cfg_xri_rebalancing = 0;
8984         }
8985
8986         /* Allow asynchronous mailbox command to go through */
8987         spin_lock_irq(&phba->hbalock);
8988         phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8989         spin_unlock_irq(&phba->hbalock);
8990
8991         /* Post receive buffers to the device */
8992         lpfc_sli4_rb_setup(phba);
8993
8994         /* Reset HBA FCF states after HBA reset */
8995         phba->fcf.fcf_flag = 0;
8996         phba->fcf.current_rec.flag = 0;
8997
8998         /* Start the ELS watchdog timer */
8999         mod_timer(&vport->els_tmofunc,
9000                   jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
9001
9002         /* Start heart beat timer */
9003         mod_timer(&phba->hb_tmofunc,
9004                   jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
9005         phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
9006         phba->last_completion_time = jiffies;
9007
9008         /* start eq_delay heartbeat */
9009         if (phba->cfg_auto_imax)
9010                 queue_delayed_work(phba->wq, &phba->eq_delay_work,
9011                                    msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
9012
9013         /* start per phba idle_stat_delay heartbeat */
9014         lpfc_init_idle_stat_hb(phba);
9015
9016         /* Start error attention (ERATT) polling timer */
9017         mod_timer(&phba->eratt_poll,
9018                   jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9019
9020         /*
9021          * The port is ready, set the host's link state to LINK_DOWN
9022          * in preparation for link interrupts.
9023          */
9024         spin_lock_irq(&phba->hbalock);
9025         phba->link_state = LPFC_LINK_DOWN;
9026
9027         /* Check if physical ports are trunked */
9028         if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
9029                 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
9030         if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
9031                 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
9032         if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
9033                 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
9034         if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
9035                 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
9036         spin_unlock_irq(&phba->hbalock);
9037
9038         /* Arm the CQs and then EQs on device */
9039         lpfc_sli4_arm_cqeq_intr(phba);
9040
9041         /* Indicate device interrupt mode */
9042         phba->sli4_hba.intr_enable = 1;
9043
9044         /* Setup CMF after HBA is initialized */
9045         lpfc_cmf_setup(phba);
9046
9047         if (!(phba->hba_flag & HBA_FCOE_MODE) &&
9048             (phba->hba_flag & LINK_DISABLED)) {
9049                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9050                                 "3103 Adapter Link is disabled.\n");
9051                 lpfc_down_link(phba, mboxq);
9052                 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9053                 if (rc != MBX_SUCCESS) {
9054                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9055                                         "3104 Adapter failed to issue "
9056                                         "DOWN_LINK mbox cmd, rc:x%x\n", rc);
9057                         goto out_io_buff_free;
9058                 }
9059         } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
9060                 /* don't perform init_link on SLI4 FC port loopback test */
9061                 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
9062                         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
9063                         if (rc)
9064                                 goto out_io_buff_free;
9065                 }
9066         }
9067         mempool_free(mboxq, phba->mbox_mem_pool);
9068
9069         /* Enable RAS FW log support */
9070         lpfc_sli4_ras_setup(phba);
9071
9072         phba->hba_flag |= HBA_SETUP;
9073         return rc;
9074
9075 out_io_buff_free:
9076         /* Free allocated IO Buffers */
9077         lpfc_io_free(phba);
9078 out_unset_queue:
9079         /* Unset all the queues set up in this routine when error out */
9080         lpfc_sli4_queue_unset(phba);
9081 out_free_iocblist:
9082         lpfc_free_iocb_list(phba);
9083 out_destroy_queue:
9084         lpfc_sli4_queue_destroy(phba);
9085 out_stop_timers:
9086         lpfc_stop_hba_timers(phba);
9087 out_free_mbox:
9088         mempool_free(mboxq, phba->mbox_mem_pool);
9089         return rc;
9090 }
9091
9092 /**
9093  * lpfc_mbox_timeout - Timeout call back function for mbox timer
9094  * @t: Context to fetch pointer to hba structure from.
9095  *
9096  * This is the callback function for mailbox timer. The mailbox
9097  * timer is armed when a new mailbox command is issued and the timer
9098  * is deleted when the mailbox complete. The function is called by
9099  * the kernel timer code when a mailbox does not complete within
9100  * expected time. This function wakes up the worker thread to
9101  * process the mailbox timeout and returns. All the processing is
9102  * done by the worker thread function lpfc_mbox_timeout_handler.
9103  **/
9104 void
9105 lpfc_mbox_timeout(struct timer_list *t)
9106 {
9107         struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
9108         unsigned long iflag;
9109         uint32_t tmo_posted;
9110
9111         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
9112         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
9113         if (!tmo_posted)
9114                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
9115         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
9116
9117         if (!tmo_posted)
9118                 lpfc_worker_wake_up(phba);
9119         return;
9120 }
9121
9122 /**
9123  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
9124  *                                    are pending
9125  * @phba: Pointer to HBA context object.
9126  *
9127  * This function checks if any mailbox completions are present on the mailbox
9128  * completion queue.
9129  **/
9130 static bool
9131 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
9132 {
9133
9134         uint32_t idx;
9135         struct lpfc_queue *mcq;
9136         struct lpfc_mcqe *mcqe;
9137         bool pending_completions = false;
9138         uint8_t qe_valid;
9139
9140         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9141                 return false;
9142
9143         /* Check for completions on mailbox completion queue */
9144
9145         mcq = phba->sli4_hba.mbx_cq;
9146         idx = mcq->hba_index;
9147         qe_valid = mcq->qe_valid;
9148         while (bf_get_le32(lpfc_cqe_valid,
9149                (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
9150                 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
9151                 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
9152                     (!bf_get_le32(lpfc_trailer_async, mcqe))) {
9153                         pending_completions = true;
9154                         break;
9155                 }
9156                 idx = (idx + 1) % mcq->entry_count;
9157                 if (mcq->hba_index == idx)
9158                         break;
9159
9160                 /* if the index wrapped around, toggle the valid bit */
9161                 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
9162                         qe_valid = (qe_valid) ? 0 : 1;
9163         }
9164         return pending_completions;
9165
9166 }
9167
9168 /**
9169  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
9170  *                                            that were missed.
9171  * @phba: Pointer to HBA context object.
9172  *
9173  * For sli4, it is possible to miss an interrupt. As such mbox completions
9174  * maybe missed causing erroneous mailbox timeouts to occur. This function
9175  * checks to see if mbox completions are on the mailbox completion queue
9176  * and will process all the completions associated with the eq for the
9177  * mailbox completion queue.
9178  **/
9179 static bool
9180 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
9181 {
9182         struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
9183         uint32_t eqidx;
9184         struct lpfc_queue *fpeq = NULL;
9185         struct lpfc_queue *eq;
9186         bool mbox_pending;
9187
9188         if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9189                 return false;
9190
9191         /* Find the EQ associated with the mbox CQ */
9192         if (sli4_hba->hdwq) {
9193                 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
9194                         eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9195                         if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9196                                 fpeq = eq;
9197                                 break;
9198                         }
9199                 }
9200         }
9201         if (!fpeq)
9202                 return false;
9203
9204         /* Turn off interrupts from this EQ */
9205
9206         sli4_hba->sli4_eq_clr_intr(fpeq);
9207
9208         /* Check to see if a mbox completion is pending */
9209
9210         mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
9211
9212         /*
9213          * If a mbox completion is pending, process all the events on EQ
9214          * associated with the mbox completion queue (this could include
9215          * mailbox commands, async events, els commands, receive queue data
9216          * and fcp commands)
9217          */
9218
9219         if (mbox_pending)
9220                 /* process and rearm the EQ */
9221                 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
9222         else
9223                 /* Always clear and re-arm the EQ */
9224                 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9225
9226         return mbox_pending;
9227
9228 }
9229
9230 /**
9231  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9232  * @phba: Pointer to HBA context object.
9233  *
9234  * This function is called from worker thread when a mailbox command times out.
9235  * The caller is not required to hold any locks. This function will reset the
9236  * HBA and recover all the pending commands.
9237  **/
9238 void
9239 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9240 {
9241         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9242         MAILBOX_t *mb = NULL;
9243
9244         struct lpfc_sli *psli = &phba->sli;
9245
9246         /* If the mailbox completed, process the completion */
9247         lpfc_sli4_process_missed_mbox_completions(phba);
9248
9249         if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9250                 return;
9251
9252         if (pmbox != NULL)
9253                 mb = &pmbox->u.mb;
9254         /* Check the pmbox pointer first.  There is a race condition
9255          * between the mbox timeout handler getting executed in the
9256          * worklist and the mailbox actually completing. When this
9257          * race condition occurs, the mbox_active will be NULL.
9258          */
9259         spin_lock_irq(&phba->hbalock);
9260         if (pmbox == NULL) {
9261                 lpfc_printf_log(phba, KERN_WARNING,
9262                                 LOG_MBOX | LOG_SLI,
9263                                 "0353 Active Mailbox cleared - mailbox timeout "
9264                                 "exiting\n");
9265                 spin_unlock_irq(&phba->hbalock);
9266                 return;
9267         }
9268
9269         /* Mbox cmd <mbxCommand> timeout */
9270         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9271                         "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9272                         mb->mbxCommand,
9273                         phba->pport->port_state,
9274                         phba->sli.sli_flag,
9275                         phba->sli.mbox_active);
9276         spin_unlock_irq(&phba->hbalock);
9277
9278         /* Setting state unknown so lpfc_sli_abort_iocb_ring
9279          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9280          * it to fail all outstanding SCSI IO.
9281          */
9282         spin_lock_irq(&phba->pport->work_port_lock);
9283         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9284         spin_unlock_irq(&phba->pport->work_port_lock);
9285         spin_lock_irq(&phba->hbalock);
9286         phba->link_state = LPFC_LINK_UNKNOWN;
9287         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9288         spin_unlock_irq(&phba->hbalock);
9289
9290         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9291                         "0345 Resetting board due to mailbox timeout\n");
9292
9293         /* Reset the HBA device */
9294         lpfc_reset_hba(phba);
9295 }
9296
9297 /**
9298  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9299  * @phba: Pointer to HBA context object.
9300  * @pmbox: Pointer to mailbox object.
9301  * @flag: Flag indicating how the mailbox need to be processed.
9302  *
9303  * This function is called by discovery code and HBA management code
9304  * to submit a mailbox command to firmware with SLI-3 interface spec. This
9305  * function gets the hbalock to protect the data structures.
9306  * The mailbox command can be submitted in polling mode, in which case
9307  * this function will wait in a polling loop for the completion of the
9308  * mailbox.
9309  * If the mailbox is submitted in no_wait mode (not polling) the
9310  * function will submit the command and returns immediately without waiting
9311  * for the mailbox completion. The no_wait is supported only when HBA
9312  * is in SLI2/SLI3 mode - interrupts are enabled.
9313  * The SLI interface allows only one mailbox pending at a time. If the
9314  * mailbox is issued in polling mode and there is already a mailbox
9315  * pending, then the function will return an error. If the mailbox is issued
9316  * in NO_WAIT mode and there is a mailbox pending already, the function
9317  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9318  * The sli layer owns the mailbox object until the completion of mailbox
9319  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9320  * return codes the caller owns the mailbox command after the return of
9321  * the function.
9322  **/
9323 static int
9324 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9325                        uint32_t flag)
9326 {
9327         MAILBOX_t *mbx;
9328         struct lpfc_sli *psli = &phba->sli;
9329         uint32_t status, evtctr;
9330         uint32_t ha_copy, hc_copy;
9331         int i;
9332         unsigned long timeout;
9333         unsigned long drvr_flag = 0;
9334         uint32_t word0, ldata;
9335         void __iomem *to_slim;
9336         int processing_queue = 0;
9337
9338         spin_lock_irqsave(&phba->hbalock, drvr_flag);
9339         if (!pmbox) {
9340                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9341                 /* processing mbox queue from intr_handler */
9342                 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9343                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9344                         return MBX_SUCCESS;
9345                 }
9346                 processing_queue = 1;
9347                 pmbox = lpfc_mbox_get(phba);
9348                 if (!pmbox) {
9349                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9350                         return MBX_SUCCESS;
9351                 }
9352         }
9353
9354         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9355                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9356                 if(!pmbox->vport) {
9357                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9358                         lpfc_printf_log(phba, KERN_ERR,
9359                                         LOG_MBOX | LOG_VPORT,
9360                                         "1806 Mbox x%x failed. No vport\n",
9361                                         pmbox->u.mb.mbxCommand);
9362                         dump_stack();
9363                         goto out_not_finished;
9364                 }
9365         }
9366
9367         /* If the PCI channel is in offline state, do not post mbox. */
9368         if (unlikely(pci_channel_offline(phba->pcidev))) {
9369                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9370                 goto out_not_finished;
9371         }
9372
9373         /* If HBA has a deferred error attention, fail the iocb. */
9374         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9375                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9376                 goto out_not_finished;
9377         }
9378
9379         psli = &phba->sli;
9380
9381         mbx = &pmbox->u.mb;
9382         status = MBX_SUCCESS;
9383
9384         if (phba->link_state == LPFC_HBA_ERROR) {
9385                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9386
9387                 /* Mbox command <mbxCommand> cannot issue */
9388                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9389                                 "(%d):0311 Mailbox command x%x cannot "
9390                                 "issue Data: x%x x%x\n",
9391                                 pmbox->vport ? pmbox->vport->vpi : 0,
9392                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9393                 goto out_not_finished;
9394         }
9395
9396         if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9397                 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9398                         !(hc_copy & HC_MBINT_ENA)) {
9399                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9400                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9401                                 "(%d):2528 Mailbox command x%x cannot "
9402                                 "issue Data: x%x x%x\n",
9403                                 pmbox->vport ? pmbox->vport->vpi : 0,
9404                                 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9405                         goto out_not_finished;
9406                 }
9407         }
9408
9409         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9410                 /* Polling for a mbox command when another one is already active
9411                  * is not allowed in SLI. Also, the driver must have established
9412                  * SLI2 mode to queue and process multiple mbox commands.
9413                  */
9414
9415                 if (flag & MBX_POLL) {
9416                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9417
9418                         /* Mbox command <mbxCommand> cannot issue */
9419                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9420                                         "(%d):2529 Mailbox command x%x "
9421                                         "cannot issue Data: x%x x%x\n",
9422                                         pmbox->vport ? pmbox->vport->vpi : 0,
9423                                         pmbox->u.mb.mbxCommand,
9424                                         psli->sli_flag, flag);
9425                         goto out_not_finished;
9426                 }
9427
9428                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9429                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9430                         /* Mbox command <mbxCommand> cannot issue */
9431                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9432                                         "(%d):2530 Mailbox command x%x "
9433                                         "cannot issue Data: x%x x%x\n",
9434                                         pmbox->vport ? pmbox->vport->vpi : 0,
9435                                         pmbox->u.mb.mbxCommand,
9436                                         psli->sli_flag, flag);
9437                         goto out_not_finished;
9438                 }
9439
9440                 /* Another mailbox command is still being processed, queue this
9441                  * command to be processed later.
9442                  */
9443                 lpfc_mbox_put(phba, pmbox);
9444
9445                 /* Mbox cmd issue - BUSY */
9446                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9447                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
9448                                 "x%x x%x x%x x%x\n",
9449                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9450                                 mbx->mbxCommand,
9451                                 phba->pport ? phba->pport->port_state : 0xff,
9452                                 psli->sli_flag, flag);
9453
9454                 psli->slistat.mbox_busy++;
9455                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9456
9457                 if (pmbox->vport) {
9458                         lpfc_debugfs_disc_trc(pmbox->vport,
9459                                 LPFC_DISC_TRC_MBOX_VPORT,
9460                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
9461                                 (uint32_t)mbx->mbxCommand,
9462                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9463                 }
9464                 else {
9465                         lpfc_debugfs_disc_trc(phba->pport,
9466                                 LPFC_DISC_TRC_MBOX,
9467                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
9468                                 (uint32_t)mbx->mbxCommand,
9469                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9470                 }
9471
9472                 return MBX_BUSY;
9473         }
9474
9475         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9476
9477         /* If we are not polling, we MUST be in SLI2 mode */
9478         if (flag != MBX_POLL) {
9479                 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9480                     (mbx->mbxCommand != MBX_KILL_BOARD)) {
9481                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9482                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9483                         /* Mbox command <mbxCommand> cannot issue */
9484                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9485                                         "(%d):2531 Mailbox command x%x "
9486                                         "cannot issue Data: x%x x%x\n",
9487                                         pmbox->vport ? pmbox->vport->vpi : 0,
9488                                         pmbox->u.mb.mbxCommand,
9489                                         psli->sli_flag, flag);
9490                         goto out_not_finished;
9491                 }
9492                 /* timeout active mbox command */
9493                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9494                                            1000);
9495                 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9496         }
9497
9498         /* Mailbox cmd <cmd> issue */
9499         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9500                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9501                         "x%x\n",
9502                         pmbox->vport ? pmbox->vport->vpi : 0,
9503                         mbx->mbxCommand,
9504                         phba->pport ? phba->pport->port_state : 0xff,
9505                         psli->sli_flag, flag);
9506
9507         if (mbx->mbxCommand != MBX_HEARTBEAT) {
9508                 if (pmbox->vport) {
9509                         lpfc_debugfs_disc_trc(pmbox->vport,
9510                                 LPFC_DISC_TRC_MBOX_VPORT,
9511                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9512                                 (uint32_t)mbx->mbxCommand,
9513                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9514                 }
9515                 else {
9516                         lpfc_debugfs_disc_trc(phba->pport,
9517                                 LPFC_DISC_TRC_MBOX,
9518                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
9519                                 (uint32_t)mbx->mbxCommand,
9520                                 mbx->un.varWords[0], mbx->un.varWords[1]);
9521                 }
9522         }
9523
9524         psli->slistat.mbox_cmd++;
9525         evtctr = psli->slistat.mbox_event;
9526
9527         /* next set own bit for the adapter and copy over command word */
9528         mbx->mbxOwner = OWN_CHIP;
9529
9530         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9531                 /* Populate mbox extension offset word. */
9532                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9533                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9534                                 = (uint8_t *)phba->mbox_ext
9535                                   - (uint8_t *)phba->mbox;
9536                 }
9537
9538                 /* Copy the mailbox extension data */
9539                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9540                         lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9541                                               (uint8_t *)phba->mbox_ext,
9542                                               pmbox->in_ext_byte_len);
9543                 }
9544                 /* Copy command data to host SLIM area */
9545                 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9546         } else {
9547                 /* Populate mbox extension offset word. */
9548                 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9549                         *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9550                                 = MAILBOX_HBA_EXT_OFFSET;
9551
9552                 /* Copy the mailbox extension data */
9553                 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9554                         lpfc_memcpy_to_slim(phba->MBslimaddr +
9555                                 MAILBOX_HBA_EXT_OFFSET,
9556                                 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9557
9558                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9559                         /* copy command data into host mbox for cmpl */
9560                         lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9561                                               MAILBOX_CMD_SIZE);
9562
9563                 /* First copy mbox command data to HBA SLIM, skip past first
9564                    word */
9565                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9566                 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9567                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
9568
9569                 /* Next copy over first word, with mbxOwner set */
9570                 ldata = *((uint32_t *)mbx);
9571                 to_slim = phba->MBslimaddr;
9572                 writel(ldata, to_slim);
9573                 readl(to_slim); /* flush */
9574
9575                 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9576                         /* switch over to host mailbox */
9577                         psli->sli_flag |= LPFC_SLI_ACTIVE;
9578         }
9579
9580         wmb();
9581
9582         switch (flag) {
9583         case MBX_NOWAIT:
9584                 /* Set up reference to mailbox command */
9585                 psli->mbox_active = pmbox;
9586                 /* Interrupt board to do it */
9587                 writel(CA_MBATT, phba->CAregaddr);
9588                 readl(phba->CAregaddr); /* flush */
9589                 /* Don't wait for it to finish, just return */
9590                 break;
9591
9592         case MBX_POLL:
9593                 /* Set up null reference to mailbox command */
9594                 psli->mbox_active = NULL;
9595                 /* Interrupt board to do it */
9596                 writel(CA_MBATT, phba->CAregaddr);
9597                 readl(phba->CAregaddr); /* flush */
9598
9599                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9600                         /* First read mbox status word */
9601                         word0 = *((uint32_t *)phba->mbox);
9602                         word0 = le32_to_cpu(word0);
9603                 } else {
9604                         /* First read mbox status word */
9605                         if (lpfc_readl(phba->MBslimaddr, &word0)) {
9606                                 spin_unlock_irqrestore(&phba->hbalock,
9607                                                        drvr_flag);
9608                                 goto out_not_finished;
9609                         }
9610                 }
9611
9612                 /* Read the HBA Host Attention Register */
9613                 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9614                         spin_unlock_irqrestore(&phba->hbalock,
9615                                                        drvr_flag);
9616                         goto out_not_finished;
9617                 }
9618                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9619                                                         1000) + jiffies;
9620                 i = 0;
9621                 /* Wait for command to complete */
9622                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9623                        (!(ha_copy & HA_MBATT) &&
9624                         (phba->link_state > LPFC_WARM_START))) {
9625                         if (time_after(jiffies, timeout)) {
9626                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9627                                 spin_unlock_irqrestore(&phba->hbalock,
9628                                                        drvr_flag);
9629                                 goto out_not_finished;
9630                         }
9631
9632                         /* Check if we took a mbox interrupt while we were
9633                            polling */
9634                         if (((word0 & OWN_CHIP) != OWN_CHIP)
9635                             && (evtctr != psli->slistat.mbox_event))
9636                                 break;
9637
9638                         if (i++ > 10) {
9639                                 spin_unlock_irqrestore(&phba->hbalock,
9640                                                        drvr_flag);
9641                                 msleep(1);
9642                                 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9643                         }
9644
9645                         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9646                                 /* First copy command data */
9647                                 word0 = *((uint32_t *)phba->mbox);
9648                                 word0 = le32_to_cpu(word0);
9649                                 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9650                                         MAILBOX_t *slimmb;
9651                                         uint32_t slimword0;
9652                                         /* Check real SLIM for any errors */
9653                                         slimword0 = readl(phba->MBslimaddr);
9654                                         slimmb = (MAILBOX_t *) & slimword0;
9655                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9656                                             && slimmb->mbxStatus) {
9657                                                 psli->sli_flag &=
9658                                                     ~LPFC_SLI_ACTIVE;
9659                                                 word0 = slimword0;
9660                                         }
9661                                 }
9662                         } else {
9663                                 /* First copy command data */
9664                                 word0 = readl(phba->MBslimaddr);
9665                         }
9666                         /* Read the HBA Host Attention Register */
9667                         if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9668                                 spin_unlock_irqrestore(&phba->hbalock,
9669                                                        drvr_flag);
9670                                 goto out_not_finished;
9671                         }
9672                 }
9673
9674                 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9675                         /* copy results back to user */
9676                         lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9677                                                 MAILBOX_CMD_SIZE);
9678                         /* Copy the mailbox extension data */
9679                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9680                                 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9681                                                       pmbox->ctx_buf,
9682                                                       pmbox->out_ext_byte_len);
9683                         }
9684                 } else {
9685                         /* First copy command data */
9686                         lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9687                                                 MAILBOX_CMD_SIZE);
9688                         /* Copy the mailbox extension data */
9689                         if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9690                                 lpfc_memcpy_from_slim(
9691                                         pmbox->ctx_buf,
9692                                         phba->MBslimaddr +
9693                                         MAILBOX_HBA_EXT_OFFSET,
9694                                         pmbox->out_ext_byte_len);
9695                         }
9696                 }
9697
9698                 writel(HA_MBATT, phba->HAregaddr);
9699                 readl(phba->HAregaddr); /* flush */
9700
9701                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9702                 status = mbx->mbxStatus;
9703         }
9704
9705         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9706         return status;
9707
9708 out_not_finished:
9709         if (processing_queue) {
9710                 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9711                 lpfc_mbox_cmpl_put(phba, pmbox);
9712         }
9713         return MBX_NOT_FINISHED;
9714 }
9715
9716 /**
9717  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9718  * @phba: Pointer to HBA context object.
9719  *
9720  * The function blocks the posting of SLI4 asynchronous mailbox commands from
9721  * the driver internal pending mailbox queue. It will then try to wait out the
9722  * possible outstanding mailbox command before return.
9723  *
9724  * Returns:
9725  *      0 - the outstanding mailbox command completed; otherwise, the wait for
9726  *      the outstanding mailbox command timed out.
9727  **/
9728 static int
9729 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9730 {
9731         struct lpfc_sli *psli = &phba->sli;
9732         LPFC_MBOXQ_t *mboxq;
9733         int rc = 0;
9734         unsigned long timeout = 0;
9735         u32 sli_flag;
9736         u8 cmd, subsys, opcode;
9737
9738         /* Mark the asynchronous mailbox command posting as blocked */
9739         spin_lock_irq(&phba->hbalock);
9740         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9741         /* Determine how long we might wait for the active mailbox
9742          * command to be gracefully completed by firmware.
9743          */
9744         if (phba->sli.mbox_active)
9745                 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9746                                                 phba->sli.mbox_active) *
9747                                                 1000) + jiffies;
9748         spin_unlock_irq(&phba->hbalock);
9749
9750         /* Make sure the mailbox is really active */
9751         if (timeout)
9752                 lpfc_sli4_process_missed_mbox_completions(phba);
9753
9754         /* Wait for the outstanding mailbox command to complete */
9755         while (phba->sli.mbox_active) {
9756                 /* Check active mailbox complete status every 2ms */
9757                 msleep(2);
9758                 if (time_after(jiffies, timeout)) {
9759                         /* Timeout, mark the outstanding cmd not complete */
9760
9761                         /* Sanity check sli.mbox_active has not completed or
9762                          * cancelled from another context during last 2ms sleep,
9763                          * so take hbalock to be sure before logging.
9764                          */
9765                         spin_lock_irq(&phba->hbalock);
9766                         if (phba->sli.mbox_active) {
9767                                 mboxq = phba->sli.mbox_active;
9768                                 cmd = mboxq->u.mb.mbxCommand;
9769                                 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9770                                                                          mboxq);
9771                                 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9772                                                                          mboxq);
9773                                 sli_flag = psli->sli_flag;
9774                                 spin_unlock_irq(&phba->hbalock);
9775                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9776                                                 "2352 Mailbox command x%x "
9777                                                 "(x%x/x%x) sli_flag x%x could "
9778                                                 "not complete\n",
9779                                                 cmd, subsys, opcode,
9780                                                 sli_flag);
9781                         } else {
9782                                 spin_unlock_irq(&phba->hbalock);
9783                         }
9784
9785                         rc = 1;
9786                         break;
9787                 }
9788         }
9789
9790         /* Can not cleanly block async mailbox command, fails it */
9791         if (rc) {
9792                 spin_lock_irq(&phba->hbalock);
9793                 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9794                 spin_unlock_irq(&phba->hbalock);
9795         }
9796         return rc;
9797 }
9798
9799 /**
9800  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9801  * @phba: Pointer to HBA context object.
9802  *
9803  * The function unblocks and resume posting of SLI4 asynchronous mailbox
9804  * commands from the driver internal pending mailbox queue. It makes sure
9805  * that there is no outstanding mailbox command before resuming posting
9806  * asynchronous mailbox commands. If, for any reason, there is outstanding
9807  * mailbox command, it will try to wait it out before resuming asynchronous
9808  * mailbox command posting.
9809  **/
9810 static void
9811 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9812 {
9813         struct lpfc_sli *psli = &phba->sli;
9814
9815         spin_lock_irq(&phba->hbalock);
9816         if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9817                 /* Asynchronous mailbox posting is not blocked, do nothing */
9818                 spin_unlock_irq(&phba->hbalock);
9819                 return;
9820         }
9821
9822         /* Outstanding synchronous mailbox command is guaranteed to be done,
9823          * successful or timeout, after timing-out the outstanding mailbox
9824          * command shall always be removed, so just unblock posting async
9825          * mailbox command and resume
9826          */
9827         psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9828         spin_unlock_irq(&phba->hbalock);
9829
9830         /* wake up worker thread to post asynchronous mailbox command */
9831         lpfc_worker_wake_up(phba);
9832 }
9833
9834 /**
9835  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9836  * @phba: Pointer to HBA context object.
9837  * @mboxq: Pointer to mailbox object.
9838  *
9839  * The function waits for the bootstrap mailbox register ready bit from
9840  * port for twice the regular mailbox command timeout value.
9841  *
9842  *      0 - no timeout on waiting for bootstrap mailbox register ready.
9843  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
9844  *                     is in an unrecoverable state.
9845  **/
9846 static int
9847 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9848 {
9849         uint32_t db_ready;
9850         unsigned long timeout;
9851         struct lpfc_register bmbx_reg;
9852         struct lpfc_register portstat_reg = {-1};
9853
9854         /* Sanity check - there is no point to wait if the port is in an
9855          * unrecoverable state.
9856          */
9857         if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
9858             LPFC_SLI_INTF_IF_TYPE_2) {
9859                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9860                                &portstat_reg.word0) ||
9861                     lpfc_sli4_unrecoverable_port(&portstat_reg)) {
9862                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9863                                         "3858 Skipping bmbx ready because "
9864                                         "Port Status x%x\n",
9865                                         portstat_reg.word0);
9866                         return MBXERR_ERROR;
9867                 }
9868         }
9869
9870         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9871                                    * 1000) + jiffies;
9872
9873         do {
9874                 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9875                 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9876                 if (!db_ready)
9877                         mdelay(2);
9878
9879                 if (time_after(jiffies, timeout))
9880                         return MBXERR_ERROR;
9881         } while (!db_ready);
9882
9883         return 0;
9884 }
9885
9886 /**
9887  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9888  * @phba: Pointer to HBA context object.
9889  * @mboxq: Pointer to mailbox object.
9890  *
9891  * The function posts a mailbox to the port.  The mailbox is expected
9892  * to be comletely filled in and ready for the port to operate on it.
9893  * This routine executes a synchronous completion operation on the
9894  * mailbox by polling for its completion.
9895  *
9896  * The caller must not be holding any locks when calling this routine.
9897  *
9898  * Returns:
9899  *      MBX_SUCCESS - mailbox posted successfully
9900  *      Any of the MBX error values.
9901  **/
9902 static int
9903 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9904 {
9905         int rc = MBX_SUCCESS;
9906         unsigned long iflag;
9907         uint32_t mcqe_status;
9908         uint32_t mbx_cmnd;
9909         struct lpfc_sli *psli = &phba->sli;
9910         struct lpfc_mqe *mb = &mboxq->u.mqe;
9911         struct lpfc_bmbx_create *mbox_rgn;
9912         struct dma_address *dma_address;
9913
9914         /*
9915          * Only one mailbox can be active to the bootstrap mailbox region
9916          * at a time and there is no queueing provided.
9917          */
9918         spin_lock_irqsave(&phba->hbalock, iflag);
9919         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9920                 spin_unlock_irqrestore(&phba->hbalock, iflag);
9921                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9922                                 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9923                                 "cannot issue Data: x%x x%x\n",
9924                                 mboxq->vport ? mboxq->vport->vpi : 0,
9925                                 mboxq->u.mb.mbxCommand,
9926                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9927                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9928                                 psli->sli_flag, MBX_POLL);
9929                 return MBXERR_ERROR;
9930         }
9931         /* The server grabs the token and owns it until release */
9932         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9933         phba->sli.mbox_active = mboxq;
9934         spin_unlock_irqrestore(&phba->hbalock, iflag);
9935
9936         /* wait for bootstrap mbox register for readyness */
9937         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9938         if (rc)
9939                 goto exit;
9940         /*
9941          * Initialize the bootstrap memory region to avoid stale data areas
9942          * in the mailbox post.  Then copy the caller's mailbox contents to
9943          * the bmbx mailbox region.
9944          */
9945         mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9946         memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9947         lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9948                                sizeof(struct lpfc_mqe));
9949
9950         /* Post the high mailbox dma address to the port and wait for ready. */
9951         dma_address = &phba->sli4_hba.bmbx.dma_address;
9952         writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9953
9954         /* wait for bootstrap mbox register for hi-address write done */
9955         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9956         if (rc)
9957                 goto exit;
9958
9959         /* Post the low mailbox dma address to the port. */
9960         writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9961
9962         /* wait for bootstrap mbox register for low address write done */
9963         rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9964         if (rc)
9965                 goto exit;
9966
9967         /*
9968          * Read the CQ to ensure the mailbox has completed.
9969          * If so, update the mailbox status so that the upper layers
9970          * can complete the request normally.
9971          */
9972         lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9973                                sizeof(struct lpfc_mqe));
9974         mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9975         lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9976                                sizeof(struct lpfc_mcqe));
9977         mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9978         /*
9979          * When the CQE status indicates a failure and the mailbox status
9980          * indicates success then copy the CQE status into the mailbox status
9981          * (and prefix it with x4000).
9982          */
9983         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9984                 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9985                         bf_set(lpfc_mqe_status, mb,
9986                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
9987                 rc = MBXERR_ERROR;
9988         } else
9989                 lpfc_sli4_swap_str(phba, mboxq);
9990
9991         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9992                         "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9993                         "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9994                         " x%x x%x CQ: x%x x%x x%x x%x\n",
9995                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9996                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9997                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9998                         bf_get(lpfc_mqe_status, mb),
9999                         mb->un.mb_words[0], mb->un.mb_words[1],
10000                         mb->un.mb_words[2], mb->un.mb_words[3],
10001                         mb->un.mb_words[4], mb->un.mb_words[5],
10002                         mb->un.mb_words[6], mb->un.mb_words[7],
10003                         mb->un.mb_words[8], mb->un.mb_words[9],
10004                         mb->un.mb_words[10], mb->un.mb_words[11],
10005                         mb->un.mb_words[12], mboxq->mcqe.word0,
10006                         mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
10007                         mboxq->mcqe.trailer);
10008 exit:
10009         /* We are holding the token, no needed for lock when release */
10010         spin_lock_irqsave(&phba->hbalock, iflag);
10011         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10012         phba->sli.mbox_active = NULL;
10013         spin_unlock_irqrestore(&phba->hbalock, iflag);
10014         return rc;
10015 }
10016
10017 /**
10018  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
10019  * @phba: Pointer to HBA context object.
10020  * @mboxq: Pointer to mailbox object.
10021  * @flag: Flag indicating how the mailbox need to be processed.
10022  *
10023  * This function is called by discovery code and HBA management code to submit
10024  * a mailbox command to firmware with SLI-4 interface spec.
10025  *
10026  * Return codes the caller owns the mailbox command after the return of the
10027  * function.
10028  **/
10029 static int
10030 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
10031                        uint32_t flag)
10032 {
10033         struct lpfc_sli *psli = &phba->sli;
10034         unsigned long iflags;
10035         int rc;
10036
10037         /* dump from issue mailbox command if setup */
10038         lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
10039
10040         rc = lpfc_mbox_dev_check(phba);
10041         if (unlikely(rc)) {
10042                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10043                                 "(%d):2544 Mailbox command x%x (x%x/x%x) "
10044                                 "cannot issue Data: x%x x%x\n",
10045                                 mboxq->vport ? mboxq->vport->vpi : 0,
10046                                 mboxq->u.mb.mbxCommand,
10047                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10048                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10049                                 psli->sli_flag, flag);
10050                 goto out_not_finished;
10051         }
10052
10053         /* Detect polling mode and jump to a handler */
10054         if (!phba->sli4_hba.intr_enable) {
10055                 if (flag == MBX_POLL)
10056                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10057                 else
10058                         rc = -EIO;
10059                 if (rc != MBX_SUCCESS)
10060                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10061                                         "(%d):2541 Mailbox command x%x "
10062                                         "(x%x/x%x) failure: "
10063                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
10064                                         "Data: x%x x%x\n",
10065                                         mboxq->vport ? mboxq->vport->vpi : 0,
10066                                         mboxq->u.mb.mbxCommand,
10067                                         lpfc_sli_config_mbox_subsys_get(phba,
10068                                                                         mboxq),
10069                                         lpfc_sli_config_mbox_opcode_get(phba,
10070                                                                         mboxq),
10071                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10072                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10073                                         bf_get(lpfc_mcqe_ext_status,
10074                                                &mboxq->mcqe),
10075                                         psli->sli_flag, flag);
10076                 return rc;
10077         } else if (flag == MBX_POLL) {
10078                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10079                                 "(%d):2542 Try to issue mailbox command "
10080                                 "x%x (x%x/x%x) synchronously ahead of async "
10081                                 "mailbox command queue: x%x x%x\n",
10082                                 mboxq->vport ? mboxq->vport->vpi : 0,
10083                                 mboxq->u.mb.mbxCommand,
10084                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10085                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10086                                 psli->sli_flag, flag);
10087                 /* Try to block the asynchronous mailbox posting */
10088                 rc = lpfc_sli4_async_mbox_block(phba);
10089                 if (!rc) {
10090                         /* Successfully blocked, now issue sync mbox cmd */
10091                         rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10092                         if (rc != MBX_SUCCESS)
10093                                 lpfc_printf_log(phba, KERN_WARNING,
10094                                         LOG_MBOX | LOG_SLI,
10095                                         "(%d):2597 Sync Mailbox command "
10096                                         "x%x (x%x/x%x) failure: "
10097                                         "mqe_sta: x%x mcqe_sta: x%x/x%x "
10098                                         "Data: x%x x%x\n",
10099                                         mboxq->vport ? mboxq->vport->vpi : 0,
10100                                         mboxq->u.mb.mbxCommand,
10101                                         lpfc_sli_config_mbox_subsys_get(phba,
10102                                                                         mboxq),
10103                                         lpfc_sli_config_mbox_opcode_get(phba,
10104                                                                         mboxq),
10105                                         bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10106                                         bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10107                                         bf_get(lpfc_mcqe_ext_status,
10108                                                &mboxq->mcqe),
10109                                         psli->sli_flag, flag);
10110                         /* Unblock the async mailbox posting afterward */
10111                         lpfc_sli4_async_mbox_unblock(phba);
10112                 }
10113                 return rc;
10114         }
10115
10116         /* Now, interrupt mode asynchronous mailbox command */
10117         rc = lpfc_mbox_cmd_check(phba, mboxq);
10118         if (rc) {
10119                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10120                                 "(%d):2543 Mailbox command x%x (x%x/x%x) "
10121                                 "cannot issue Data: x%x x%x\n",
10122                                 mboxq->vport ? mboxq->vport->vpi : 0,
10123                                 mboxq->u.mb.mbxCommand,
10124                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10125                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10126                                 psli->sli_flag, flag);
10127                 goto out_not_finished;
10128         }
10129
10130         /* Put the mailbox command to the driver internal FIFO */
10131         psli->slistat.mbox_busy++;
10132         spin_lock_irqsave(&phba->hbalock, iflags);
10133         lpfc_mbox_put(phba, mboxq);
10134         spin_unlock_irqrestore(&phba->hbalock, iflags);
10135         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10136                         "(%d):0354 Mbox cmd issue - Enqueue Data: "
10137                         "x%x (x%x/x%x) x%x x%x x%x\n",
10138                         mboxq->vport ? mboxq->vport->vpi : 0xffffff,
10139                         bf_get(lpfc_mqe_command, &mboxq->u.mqe),
10140                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10141                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10142                         phba->pport->port_state,
10143                         psli->sli_flag, MBX_NOWAIT);
10144         /* Wake up worker thread to transport mailbox command from head */
10145         lpfc_worker_wake_up(phba);
10146
10147         return MBX_BUSY;
10148
10149 out_not_finished:
10150         return MBX_NOT_FINISHED;
10151 }
10152
10153 /**
10154  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
10155  * @phba: Pointer to HBA context object.
10156  *
10157  * This function is called by worker thread to send a mailbox command to
10158  * SLI4 HBA firmware.
10159  *
10160  **/
10161 int
10162 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
10163 {
10164         struct lpfc_sli *psli = &phba->sli;
10165         LPFC_MBOXQ_t *mboxq;
10166         int rc = MBX_SUCCESS;
10167         unsigned long iflags;
10168         struct lpfc_mqe *mqe;
10169         uint32_t mbx_cmnd;
10170
10171         /* Check interrupt mode before post async mailbox command */
10172         if (unlikely(!phba->sli4_hba.intr_enable))
10173                 return MBX_NOT_FINISHED;
10174
10175         /* Check for mailbox command service token */
10176         spin_lock_irqsave(&phba->hbalock, iflags);
10177         if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
10178                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10179                 return MBX_NOT_FINISHED;
10180         }
10181         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10182                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10183                 return MBX_NOT_FINISHED;
10184         }
10185         if (unlikely(phba->sli.mbox_active)) {
10186                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10187                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10188                                 "0384 There is pending active mailbox cmd\n");
10189                 return MBX_NOT_FINISHED;
10190         }
10191         /* Take the mailbox command service token */
10192         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
10193
10194         /* Get the next mailbox command from head of queue */
10195         mboxq = lpfc_mbox_get(phba);
10196
10197         /* If no more mailbox command waiting for post, we're done */
10198         if (!mboxq) {
10199                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10200                 spin_unlock_irqrestore(&phba->hbalock, iflags);
10201                 return MBX_SUCCESS;
10202         }
10203         phba->sli.mbox_active = mboxq;
10204         spin_unlock_irqrestore(&phba->hbalock, iflags);
10205
10206         /* Check device readiness for posting mailbox command */
10207         rc = lpfc_mbox_dev_check(phba);
10208         if (unlikely(rc))
10209                 /* Driver clean routine will clean up pending mailbox */
10210                 goto out_not_finished;
10211
10212         /* Prepare the mbox command to be posted */
10213         mqe = &mboxq->u.mqe;
10214         mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
10215
10216         /* Start timer for the mbox_tmo and log some mailbox post messages */
10217         mod_timer(&psli->mbox_tmo, (jiffies +
10218                   msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
10219
10220         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10221                         "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
10222                         "x%x x%x\n",
10223                         mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10224                         lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10225                         lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10226                         phba->pport->port_state, psli->sli_flag);
10227
10228         if (mbx_cmnd != MBX_HEARTBEAT) {
10229                 if (mboxq->vport) {
10230                         lpfc_debugfs_disc_trc(mboxq->vport,
10231                                 LPFC_DISC_TRC_MBOX_VPORT,
10232                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
10233                                 mbx_cmnd, mqe->un.mb_words[0],
10234                                 mqe->un.mb_words[1]);
10235                 } else {
10236                         lpfc_debugfs_disc_trc(phba->pport,
10237                                 LPFC_DISC_TRC_MBOX,
10238                                 "MBOX Send: cmd:x%x mb:x%x x%x",
10239                                 mbx_cmnd, mqe->un.mb_words[0],
10240                                 mqe->un.mb_words[1]);
10241                 }
10242         }
10243         psli->slistat.mbox_cmd++;
10244
10245         /* Post the mailbox command to the port */
10246         rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10247         if (rc != MBX_SUCCESS) {
10248                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10249                                 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10250                                 "cannot issue Data: x%x x%x\n",
10251                                 mboxq->vport ? mboxq->vport->vpi : 0,
10252                                 mboxq->u.mb.mbxCommand,
10253                                 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10254                                 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10255                                 psli->sli_flag, MBX_NOWAIT);
10256                 goto out_not_finished;
10257         }
10258
10259         return rc;
10260
10261 out_not_finished:
10262         spin_lock_irqsave(&phba->hbalock, iflags);
10263         if (phba->sli.mbox_active) {
10264                 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10265                 __lpfc_mbox_cmpl_put(phba, mboxq);
10266                 /* Release the token */
10267                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10268                 phba->sli.mbox_active = NULL;
10269         }
10270         spin_unlock_irqrestore(&phba->hbalock, iflags);
10271
10272         return MBX_NOT_FINISHED;
10273 }
10274
10275 /**
10276  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10277  * @phba: Pointer to HBA context object.
10278  * @pmbox: Pointer to mailbox object.
10279  * @flag: Flag indicating how the mailbox need to be processed.
10280  *
10281  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10282  * the API jump table function pointer from the lpfc_hba struct.
10283  *
10284  * Return codes the caller owns the mailbox command after the return of the
10285  * function.
10286  **/
10287 int
10288 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10289 {
10290         return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10291 }
10292
10293 /**
10294  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10295  * @phba: The hba struct for which this call is being executed.
10296  * @dev_grp: The HBA PCI-Device group number.
10297  *
10298  * This routine sets up the mbox interface API function jump table in @phba
10299  * struct.
10300  * Returns: 0 - success, -ENODEV - failure.
10301  **/
10302 int
10303 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10304 {
10305
10306         switch (dev_grp) {
10307         case LPFC_PCI_DEV_LP:
10308                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10309                 phba->lpfc_sli_handle_slow_ring_event =
10310                                 lpfc_sli_handle_slow_ring_event_s3;
10311                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10312                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10313                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10314                 break;
10315         case LPFC_PCI_DEV_OC:
10316                 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10317                 phba->lpfc_sli_handle_slow_ring_event =
10318                                 lpfc_sli_handle_slow_ring_event_s4;
10319                 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10320                 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10321                 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10322                 break;
10323         default:
10324                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10325                                 "1420 Invalid HBA PCI-device group: 0x%x\n",
10326                                 dev_grp);
10327                 return -ENODEV;
10328         }
10329         return 0;
10330 }
10331
10332 /**
10333  * __lpfc_sli_ringtx_put - Add an iocb to the txq
10334  * @phba: Pointer to HBA context object.
10335  * @pring: Pointer to driver SLI ring object.
10336  * @piocb: Pointer to address of newly added command iocb.
10337  *
10338  * This function is called with hbalock held for SLI3 ports or
10339  * the ring lock held for SLI4 ports to add a command
10340  * iocb to the txq when SLI layer cannot submit the command iocb
10341  * to the ring.
10342  **/
10343 void
10344 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10345                     struct lpfc_iocbq *piocb)
10346 {
10347         if (phba->sli_rev == LPFC_SLI_REV4)
10348                 lockdep_assert_held(&pring->ring_lock);
10349         else
10350                 lockdep_assert_held(&phba->hbalock);
10351         /* Insert the caller's iocb in the txq tail for later processing. */
10352         list_add_tail(&piocb->list, &pring->txq);
10353 }
10354
10355 /**
10356  * lpfc_sli_next_iocb - Get the next iocb in the txq
10357  * @phba: Pointer to HBA context object.
10358  * @pring: Pointer to driver SLI ring object.
10359  * @piocb: Pointer to address of newly added command iocb.
10360  *
10361  * This function is called with hbalock held before a new
10362  * iocb is submitted to the firmware. This function checks
10363  * txq to flush the iocbs in txq to Firmware before
10364  * submitting new iocbs to the Firmware.
10365  * If there are iocbs in the txq which need to be submitted
10366  * to firmware, lpfc_sli_next_iocb returns the first element
10367  * of the txq after dequeuing it from txq.
10368  * If there is no iocb in the txq then the function will return
10369  * *piocb and *piocb is set to NULL. Caller needs to check
10370  * *piocb to find if there are more commands in the txq.
10371  **/
10372 static struct lpfc_iocbq *
10373 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10374                    struct lpfc_iocbq **piocb)
10375 {
10376         struct lpfc_iocbq * nextiocb;
10377
10378         lockdep_assert_held(&phba->hbalock);
10379
10380         nextiocb = lpfc_sli_ringtx_get(phba, pring);
10381         if (!nextiocb) {
10382                 nextiocb = *piocb;
10383                 *piocb = NULL;
10384         }
10385
10386         return nextiocb;
10387 }
10388
10389 /**
10390  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10391  * @phba: Pointer to HBA context object.
10392  * @ring_number: SLI ring number to issue iocb on.
10393  * @piocb: Pointer to command iocb.
10394  * @flag: Flag indicating if this command can be put into txq.
10395  *
10396  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10397  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10398  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10399  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10400  * this function allows only iocbs for posting buffers. This function finds
10401  * next available slot in the command ring and posts the command to the
10402  * available slot and writes the port attention register to request HBA start
10403  * processing new iocb. If there is no slot available in the ring and
10404  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10405  * the function returns IOCB_BUSY.
10406  *
10407  * This function is called with hbalock held. The function will return success
10408  * after it successfully submit the iocb to firmware or after adding to the
10409  * txq.
10410  **/
10411 static int
10412 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10413                     struct lpfc_iocbq *piocb, uint32_t flag)
10414 {
10415         struct lpfc_iocbq *nextiocb;
10416         IOCB_t *iocb;
10417         struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10418
10419         lockdep_assert_held(&phba->hbalock);
10420
10421         if (piocb->cmd_cmpl && (!piocb->vport) &&
10422            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10423            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10424                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10425                                 "1807 IOCB x%x failed. No vport\n",
10426                                 piocb->iocb.ulpCommand);
10427                 dump_stack();
10428                 return IOCB_ERROR;
10429         }
10430
10431
10432         /* If the PCI channel is in offline state, do not post iocbs. */
10433         if (unlikely(pci_channel_offline(phba->pcidev)))
10434                 return IOCB_ERROR;
10435
10436         /* If HBA has a deferred error attention, fail the iocb. */
10437         if (unlikely(phba->hba_flag & DEFER_ERATT))
10438                 return IOCB_ERROR;
10439
10440         /*
10441          * We should never get an IOCB if we are in a < LINK_DOWN state
10442          */
10443         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10444                 return IOCB_ERROR;
10445
10446         /*
10447          * Check to see if we are blocking IOCB processing because of a
10448          * outstanding event.
10449          */
10450         if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10451                 goto iocb_busy;
10452
10453         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10454                 /*
10455                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10456                  * can be issued if the link is not up.
10457                  */
10458                 switch (piocb->iocb.ulpCommand) {
10459                 case CMD_QUE_RING_BUF_CN:
10460                 case CMD_QUE_RING_BUF64_CN:
10461                         /*
10462                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10463                          * completion, cmd_cmpl MUST be 0.
10464                          */
10465                         if (piocb->cmd_cmpl)
10466                                 piocb->cmd_cmpl = NULL;
10467                         fallthrough;
10468                 case CMD_CREATE_XRI_CR:
10469                 case CMD_CLOSE_XRI_CN:
10470                 case CMD_CLOSE_XRI_CX:
10471                         break;
10472                 default:
10473                         goto iocb_busy;
10474                 }
10475
10476         /*
10477          * For FCP commands, we must be in a state where we can process link
10478          * attention events.
10479          */
10480         } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10481                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10482                 goto iocb_busy;
10483         }
10484
10485         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10486                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10487                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10488
10489         if (iocb)
10490                 lpfc_sli_update_ring(phba, pring);
10491         else
10492                 lpfc_sli_update_full_ring(phba, pring);
10493
10494         if (!piocb)
10495                 return IOCB_SUCCESS;
10496
10497         goto out_busy;
10498
10499  iocb_busy:
10500         pring->stats.iocb_cmd_delay++;
10501
10502  out_busy:
10503
10504         if (!(flag & SLI_IOCB_RET_IOCB)) {
10505                 __lpfc_sli_ringtx_put(phba, pring, piocb);
10506                 return IOCB_SUCCESS;
10507         }
10508
10509         return IOCB_BUSY;
10510 }
10511
10512 /**
10513  * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10514  * @phba: Pointer to HBA context object.
10515  * @ring_number: SLI ring number to issue wqe on.
10516  * @piocb: Pointer to command iocb.
10517  * @flag: Flag indicating if this command can be put into txq.
10518  *
10519  * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10520  * send  an iocb command to an HBA with SLI-3 interface spec.
10521  *
10522  * This function takes the hbalock before invoking the lockless version.
10523  * The function will return success after it successfully submit the wqe to
10524  * firmware or after adding to the txq.
10525  **/
10526 static int
10527 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10528                            struct lpfc_iocbq *piocb, uint32_t flag)
10529 {
10530         unsigned long iflags;
10531         int rc;
10532
10533         spin_lock_irqsave(&phba->hbalock, iflags);
10534         rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10535         spin_unlock_irqrestore(&phba->hbalock, iflags);
10536
10537         return rc;
10538 }
10539
10540 /**
10541  * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10542  * @phba: Pointer to HBA context object.
10543  * @ring_number: SLI ring number to issue wqe on.
10544  * @piocb: Pointer to command iocb.
10545  * @flag: Flag indicating if this command can be put into txq.
10546  *
10547  * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10548  * an wqe command to an HBA with SLI-4 interface spec.
10549  *
10550  * This function is a lockless version. The function will return success
10551  * after it successfully submit the wqe to firmware or after adding to the
10552  * txq.
10553  **/
10554 static int
10555 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10556                            struct lpfc_iocbq *piocb, uint32_t flag)
10557 {
10558         struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10559
10560         lpfc_prep_embed_io(phba, lpfc_cmd);
10561         return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10562 }
10563
10564 void
10565 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10566 {
10567         struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10568         union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10569         struct sli4_sge *sgl;
10570
10571         /* 128 byte wqe support here */
10572         sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10573
10574         if (phba->fcp_embed_io) {
10575                 struct fcp_cmnd *fcp_cmnd;
10576                 u32 *ptr;
10577
10578                 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10579
10580                 /* Word 0-2 - FCP_CMND */
10581                 wqe->generic.bde.tus.f.bdeFlags =
10582                         BUFF_TYPE_BDE_IMMED;
10583                 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10584                 wqe->generic.bde.addrHigh = 0;
10585                 wqe->generic.bde.addrLow =  88;  /* Word 22 */
10586
10587                 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10588                 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10589
10590                 /* Word 22-29  FCP CMND Payload */
10591                 ptr = &wqe->words[22];
10592                 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10593         } else {
10594                 /* Word 0-2 - Inline BDE */
10595                 wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
10596                 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10597                 wqe->generic.bde.addrHigh = sgl->addr_hi;
10598                 wqe->generic.bde.addrLow =  sgl->addr_lo;
10599
10600                 /* Word 10 */
10601                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10602                 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10603         }
10604
10605         /* add the VMID tags as per switch response */
10606         if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10607                 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10608                         bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10609                         bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10610                                         (piocb->vmid_tag.cs_ctl_vmid));
10611                 } else if (phba->cfg_vmid_app_header) {
10612                         bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10613                         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10614                         wqe->words[31] = piocb->vmid_tag.app_id;
10615                 }
10616         }
10617 }
10618
10619 /**
10620  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10621  * @phba: Pointer to HBA context object.
10622  * @ring_number: SLI ring number to issue iocb on.
10623  * @piocb: Pointer to command iocb.
10624  * @flag: Flag indicating if this command can be put into txq.
10625  *
10626  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10627  * an iocb command to an HBA with SLI-4 interface spec.
10628  *
10629  * This function is called with ringlock held. The function will return success
10630  * after it successfully submit the iocb to firmware or after adding to the
10631  * txq.
10632  **/
10633 static int
10634 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10635                          struct lpfc_iocbq *piocb, uint32_t flag)
10636 {
10637         struct lpfc_sglq *sglq;
10638         union lpfc_wqe128 *wqe;
10639         struct lpfc_queue *wq;
10640         struct lpfc_sli_ring *pring;
10641         u32 ulp_command = get_job_cmnd(phba, piocb);
10642
10643         /* Get the WQ */
10644         if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10645             (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10646                 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10647         } else {
10648                 wq = phba->sli4_hba.els_wq;
10649         }
10650
10651         /* Get corresponding ring */
10652         pring = wq->pring;
10653
10654         /*
10655          * The WQE can be either 64 or 128 bytes,
10656          */
10657
10658         lockdep_assert_held(&pring->ring_lock);
10659         wqe = &piocb->wqe;
10660         if (piocb->sli4_xritag == NO_XRI) {
10661                 if (ulp_command == CMD_ABORT_XRI_CX)
10662                         sglq = NULL;
10663                 else {
10664                         sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10665                         if (!sglq) {
10666                                 if (!(flag & SLI_IOCB_RET_IOCB)) {
10667                                         __lpfc_sli_ringtx_put(phba,
10668                                                         pring,
10669                                                         piocb);
10670                                         return IOCB_SUCCESS;
10671                                 } else {
10672                                         return IOCB_BUSY;
10673                                 }
10674                         }
10675                 }
10676         } else if (piocb->cmd_flag &  LPFC_IO_FCP) {
10677                 /* These IO's already have an XRI and a mapped sgl. */
10678                 sglq = NULL;
10679         }
10680         else {
10681                 /*
10682                  * This is a continuation of a commandi,(CX) so this
10683                  * sglq is on the active list
10684                  */
10685                 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10686                 if (!sglq)
10687                         return IOCB_ERROR;
10688         }
10689
10690         if (sglq) {
10691                 piocb->sli4_lxritag = sglq->sli4_lxritag;
10692                 piocb->sli4_xritag = sglq->sli4_xritag;
10693
10694                 /* ABTS sent by initiator to CT exchange, the
10695                  * RX_ID field will be filled with the newly
10696                  * allocated responder XRI.
10697                  */
10698                 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10699                     piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10700                         bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10701                                piocb->sli4_xritag);
10702
10703                 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10704                        piocb->sli4_xritag);
10705
10706                 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10707                         return IOCB_ERROR;
10708         }
10709
10710         if (lpfc_sli4_wq_put(wq, wqe))
10711                 return IOCB_ERROR;
10712
10713         lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10714
10715         return 0;
10716 }
10717
10718 /*
10719  * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10720  *
10721  * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10722  * or IOCB for sli-3  function.
10723  * pointer from the lpfc_hba struct.
10724  *
10725  * Return codes:
10726  * IOCB_ERROR - Error
10727  * IOCB_SUCCESS - Success
10728  * IOCB_BUSY - Busy
10729  **/
10730 int
10731 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10732                       struct lpfc_iocbq *piocb, uint32_t flag)
10733 {
10734         return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10735 }
10736
10737 /*
10738  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10739  *
10740  * This routine wraps the actual lockless version for issusing IOCB function
10741  * pointer from the lpfc_hba struct.
10742  *
10743  * Return codes:
10744  * IOCB_ERROR - Error
10745  * IOCB_SUCCESS - Success
10746  * IOCB_BUSY - Busy
10747  **/
10748 int
10749 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10750                 struct lpfc_iocbq *piocb, uint32_t flag)
10751 {
10752         return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10753 }
10754
10755 static void
10756 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10757                                struct lpfc_vport *vport,
10758                                struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10759                                u32 elscmd, u8 tmo, u8 expect_rsp)
10760 {
10761         struct lpfc_hba *phba = vport->phba;
10762         IOCB_t *cmd;
10763
10764         cmd = &cmdiocbq->iocb;
10765         memset(cmd, 0, sizeof(*cmd));
10766
10767         cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10768         cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10769         cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10770
10771         if (expect_rsp) {
10772                 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10773                 cmd->un.elsreq64.remoteID = did; /* DID */
10774                 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10775                 cmd->ulpTimeout = tmo;
10776         } else {
10777                 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10778                 cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10779                 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10780                 cmd->ulpPU = PARM_NPIV_DID;
10781         }
10782         cmd->ulpBdeCount = 1;
10783         cmd->ulpLe = 1;
10784         cmd->ulpClass = CLASS3;
10785
10786         /* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10787         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10788                 if (expect_rsp) {
10789                         cmd->un.elsreq64.myID = vport->fc_myDID;
10790
10791                         /* For ELS_REQUEST64_CR, use the VPI by default */
10792                         cmd->ulpContext = phba->vpi_ids[vport->vpi];
10793                 }
10794
10795                 cmd->ulpCt_h = 0;
10796                 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10797                 if (elscmd == ELS_CMD_ECHO)
10798                         cmd->ulpCt_l = 0; /* context = invalid RPI */
10799                 else
10800                         cmd->ulpCt_l = 1; /* context = VPI */
10801         }
10802 }
10803
10804 static void
10805 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10806                                struct lpfc_vport *vport,
10807                                struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10808                                u32 elscmd, u8 tmo, u8 expect_rsp)
10809 {
10810         struct lpfc_hba  *phba = vport->phba;
10811         union lpfc_wqe128 *wqe;
10812         struct ulp_bde64_le *bde;
10813         u8 els_id;
10814
10815         wqe = &cmdiocbq->wqe;
10816         memset(wqe, 0, sizeof(*wqe));
10817
10818         /* Word 0 - 2 BDE */
10819         bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10820         bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10821         bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10822         bde->type_size = cpu_to_le32(cmd_size);
10823         bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10824
10825         if (expect_rsp) {
10826                 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10827
10828                 /* Transfer length */
10829                 wqe->els_req.payload_len = cmd_size;
10830                 wqe->els_req.max_response_payload_len = FCELSSIZE;
10831
10832                 /* DID */
10833                 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10834
10835                 /* Word 11 - ELS_ID */
10836                 switch (elscmd) {
10837                 case ELS_CMD_PLOGI:
10838                         els_id = LPFC_ELS_ID_PLOGI;
10839                         break;
10840                 case ELS_CMD_FLOGI:
10841                         els_id = LPFC_ELS_ID_FLOGI;
10842                         break;
10843                 case ELS_CMD_LOGO:
10844                         els_id = LPFC_ELS_ID_LOGO;
10845                         break;
10846                 case ELS_CMD_FDISC:
10847                         if (!vport->fc_myDID) {
10848                                 els_id = LPFC_ELS_ID_FDISC;
10849                                 break;
10850                         }
10851                         fallthrough;
10852                 default:
10853                         els_id = LPFC_ELS_ID_DEFAULT;
10854                         break;
10855                 }
10856
10857                 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10858         } else {
10859                 /* DID */
10860                 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10861
10862                 /* Transfer length */
10863                 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10864
10865                 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10866                        CMD_XMIT_ELS_RSP64_WQE);
10867         }
10868
10869         bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10870         bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10871         bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10872
10873         /* If we have NPIV enabled, we want to send ELS traffic by VPI.
10874          * For SLI4, since the driver controls VPIs we also want to include
10875          * all ELS pt2pt protocol traffic as well.
10876          */
10877         if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10878             (vport->fc_flag & FC_PT2PT)) {
10879                 if (expect_rsp) {
10880                         bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10881
10882                         /* For ELS_REQUEST64_WQE, use the VPI by default */
10883                         bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10884                                phba->vpi_ids[vport->vpi]);
10885                 }
10886
10887                 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10888                 if (elscmd == ELS_CMD_ECHO)
10889                         bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10890                 else
10891                         bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10892         }
10893 }
10894
10895 void
10896 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10897                           struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10898                           u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10899                           u8 expect_rsp)
10900 {
10901         phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10902                                           elscmd, tmo, expect_rsp);
10903 }
10904
10905 static void
10906 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10907                            u16 rpi, u32 num_entry, u8 tmo)
10908 {
10909         IOCB_t *cmd;
10910
10911         cmd = &cmdiocbq->iocb;
10912         memset(cmd, 0, sizeof(*cmd));
10913
10914         cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10915         cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10916         cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10917         cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10918
10919         cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10920         cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10921         cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10922
10923         cmd->ulpContext = rpi;
10924         cmd->ulpClass = CLASS3;
10925         cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10926         cmd->ulpBdeCount = 1;
10927         cmd->ulpLe = 1;
10928         cmd->ulpOwner = OWN_CHIP;
10929         cmd->ulpTimeout = tmo;
10930 }
10931
10932 static void
10933 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10934                            u16 rpi, u32 num_entry, u8 tmo)
10935 {
10936         union lpfc_wqe128 *cmdwqe;
10937         struct ulp_bde64_le *bde, *bpl;
10938         u32 xmit_len = 0, total_len = 0, size, type, i;
10939
10940         cmdwqe = &cmdiocbq->wqe;
10941         memset(cmdwqe, 0, sizeof(*cmdwqe));
10942
10943         /* Calculate total_len and xmit_len */
10944         bpl = (struct ulp_bde64_le *)bmp->virt;
10945         for (i = 0; i < num_entry; i++) {
10946                 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10947                 total_len += size;
10948         }
10949         for (i = 0; i < num_entry; i++) {
10950                 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10951                 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10952                 if (type != ULP_BDE64_TYPE_BDE_64)
10953                         break;
10954                 xmit_len += size;
10955         }
10956
10957         /* Words 0 - 2 */
10958         bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10959         bde->addr_low = bpl->addr_low;
10960         bde->addr_high = bpl->addr_high;
10961         bde->type_size = cpu_to_le32(xmit_len);
10962         bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10963
10964         /* Word 3 */
10965         cmdwqe->gen_req.request_payload_len = xmit_len;
10966
10967         /* Word 5 */
10968         bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10969         bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10970         bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10971         bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10972
10973         /* Word 6 */
10974         bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10975
10976         /* Word 7 */
10977         bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10978         bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10979         bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10980         bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10981
10982         /* Word 12 */
10983         cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10984 }
10985
10986 void
10987 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10988                       struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
10989 {
10990         phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
10991 }
10992
10993 static void
10994 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
10995                               struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10996                               u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10997 {
10998         IOCB_t *icmd;
10999
11000         icmd = &cmdiocbq->iocb;
11001         memset(icmd, 0, sizeof(*icmd));
11002
11003         icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
11004         icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
11005         icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
11006         icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
11007         icmd->un.xseq64.w5.hcsw.Fctl = LA;
11008         if (last_seq)
11009                 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
11010         icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11011         icmd->un.xseq64.w5.hcsw.Rctl = rctl;
11012         icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
11013
11014         icmd->ulpBdeCount = 1;
11015         icmd->ulpLe = 1;
11016         icmd->ulpClass = CLASS3;
11017
11018         switch (cr_cx_cmd) {
11019         case CMD_XMIT_SEQUENCE64_CR:
11020                 icmd->ulpContext = rpi;
11021                 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
11022                 break;
11023         case CMD_XMIT_SEQUENCE64_CX:
11024                 icmd->ulpContext = ox_id;
11025                 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
11026                 break;
11027         default:
11028                 break;
11029         }
11030 }
11031
11032 static void
11033 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
11034                               struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11035                               u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11036 {
11037         union lpfc_wqe128 *wqe;
11038         struct ulp_bde64 *bpl;
11039
11040         wqe = &cmdiocbq->wqe;
11041         memset(wqe, 0, sizeof(*wqe));
11042
11043         /* Words 0 - 2 */
11044         bpl = (struct ulp_bde64 *)bmp->virt;
11045         wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
11046         wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
11047         wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
11048
11049         /* Word 5 */
11050         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
11051         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
11052         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
11053         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
11054         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
11055
11056         /* Word 6 */
11057         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
11058
11059         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
11060                CMD_XMIT_SEQUENCE64_WQE);
11061
11062         /* Word 7 */
11063         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
11064
11065         /* Word 9 */
11066         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
11067
11068         /* Word 12 */
11069         if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
11070                 wqe->xmit_sequence.xmit_len = full_size;
11071         else
11072                 wqe->xmit_sequence.xmit_len =
11073                         wqe->xmit_sequence.bde.tus.f.bdeSize;
11074 }
11075
11076 void
11077 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11078                          struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11079                          u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11080 {
11081         phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
11082                                          rctl, last_seq, cr_cx_cmd);
11083 }
11084
11085 static void
11086 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11087                              u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11088                              bool wqec)
11089 {
11090         IOCB_t *icmd = NULL;
11091
11092         icmd = &cmdiocbq->iocb;
11093         memset(icmd, 0, sizeof(*icmd));
11094
11095         /* Word 5 */
11096         icmd->un.acxri.abortContextTag = ulp_context;
11097         icmd->un.acxri.abortIoTag = iotag;
11098
11099         if (ia) {
11100                 /* Word 7 */
11101                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
11102         } else {
11103                 /* Word 3 */
11104                 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
11105
11106                 /* Word 7 */
11107                 icmd->ulpClass = ulp_class;
11108                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
11109         }
11110
11111         /* Word 7 */
11112         icmd->ulpLe = 1;
11113 }
11114
11115 static void
11116 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11117                              u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11118                              bool wqec)
11119 {
11120         union lpfc_wqe128 *wqe;
11121
11122         wqe = &cmdiocbq->wqe;
11123         memset(wqe, 0, sizeof(*wqe));
11124
11125         /* Word 3 */
11126         bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
11127         if (ia)
11128                 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
11129         else
11130                 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
11131
11132         /* Word 7 */
11133         bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
11134
11135         /* Word 8 */
11136         wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
11137
11138         /* Word 9 */
11139         bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
11140
11141         /* Word 10 */
11142         bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
11143
11144         /* Word 11 */
11145         if (wqec)
11146                 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
11147         bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
11148         bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11149 }
11150
11151 void
11152 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11153                         u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
11154                         bool ia, bool wqec)
11155 {
11156         phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
11157                                         cqid, ia, wqec);
11158 }
11159
11160 /**
11161  * lpfc_sli_api_table_setup - Set up sli api function jump table
11162  * @phba: The hba struct for which this call is being executed.
11163  * @dev_grp: The HBA PCI-Device group number.
11164  *
11165  * This routine sets up the SLI interface API function jump table in @phba
11166  * struct.
11167  * Returns: 0 - success, -ENODEV - failure.
11168  **/
11169 int
11170 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11171 {
11172
11173         switch (dev_grp) {
11174         case LPFC_PCI_DEV_LP:
11175                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11176                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11177                 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11178                 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
11179                 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
11180                 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
11181                 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
11182                 break;
11183         case LPFC_PCI_DEV_OC:
11184                 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11185                 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11186                 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11187                 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
11188                 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
11189                 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
11190                 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
11191                 break;
11192         default:
11193                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11194                                 "1419 Invalid HBA PCI-device group: 0x%x\n",
11195                                 dev_grp);
11196                 return -ENODEV;
11197         }
11198         return 0;
11199 }
11200
11201 /**
11202  * lpfc_sli4_calc_ring - Calculates which ring to use
11203  * @phba: Pointer to HBA context object.
11204  * @piocb: Pointer to command iocb.
11205  *
11206  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11207  * hba_wqidx, thus we need to calculate the corresponding ring.
11208  * Since ABORTS must go on the same WQ of the command they are
11209  * aborting, we use command's hba_wqidx.
11210  */
11211 struct lpfc_sli_ring *
11212 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11213 {
11214         struct lpfc_io_buf *lpfc_cmd;
11215
11216         if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11217                 if (unlikely(!phba->sli4_hba.hdwq))
11218                         return NULL;
11219                 /*
11220                  * for abort iocb hba_wqidx should already
11221                  * be setup based on what work queue we used.
11222                  */
11223                 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11224                         lpfc_cmd = piocb->io_buf;
11225                         piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11226                 }
11227                 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11228         } else {
11229                 if (unlikely(!phba->sli4_hba.els_wq))
11230                         return NULL;
11231                 piocb->hba_wqidx = 0;
11232                 return phba->sli4_hba.els_wq->pring;
11233         }
11234 }
11235
11236 inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
11237 {
11238         struct lpfc_hba *phba = eq->phba;
11239
11240         /*
11241          * Unlocking an irq is one of the entry point to check
11242          * for re-schedule, but we are good for io submission
11243          * path as midlayer does a get_cpu to glue us in. Flush
11244          * out the invalidate queue so we can see the updated
11245          * value for flag.
11246          */
11247         smp_rmb();
11248
11249         if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
11250                 /* We will not likely get the completion for the caller
11251                  * during this iteration but i guess that's fine.
11252                  * Future io's coming on this eq should be able to
11253                  * pick it up.  As for the case of single io's, they
11254                  * will be handled through a sched from polling timer
11255                  * function which is currently triggered every 1msec.
11256                  */
11257                 lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
11258 }
11259
11260 /**
11261  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11262  * @phba: Pointer to HBA context object.
11263  * @ring_number: Ring number
11264  * @piocb: Pointer to command iocb.
11265  * @flag: Flag indicating if this command can be put into txq.
11266  *
11267  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11268  * function. This function gets the hbalock and calls
11269  * __lpfc_sli_issue_iocb function and will return the error returned
11270  * by __lpfc_sli_issue_iocb function. This wrapper is used by
11271  * functions which do not hold hbalock.
11272  **/
11273 int
11274 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11275                     struct lpfc_iocbq *piocb, uint32_t flag)
11276 {
11277         struct lpfc_sli_ring *pring;
11278         struct lpfc_queue *eq;
11279         unsigned long iflags;
11280         int rc;
11281
11282         /* If the PCI channel is in offline state, do not post iocbs. */
11283         if (unlikely(pci_channel_offline(phba->pcidev)))
11284                 return IOCB_ERROR;
11285
11286         if (phba->sli_rev == LPFC_SLI_REV4) {
11287                 lpfc_sli_prep_wqe(phba, piocb);
11288
11289                 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11290
11291                 pring = lpfc_sli4_calc_ring(phba, piocb);
11292                 if (unlikely(pring == NULL))
11293                         return IOCB_ERROR;
11294
11295                 spin_lock_irqsave(&pring->ring_lock, iflags);
11296                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11297                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11298
11299                 lpfc_sli4_poll_eq(eq);
11300         } else {
11301                 /* For now, SLI2/3 will still use hbalock */
11302                 spin_lock_irqsave(&phba->hbalock, iflags);
11303                 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11304                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11305         }
11306         return rc;
11307 }
11308
11309 /**
11310  * lpfc_extra_ring_setup - Extra ring setup function
11311  * @phba: Pointer to HBA context object.
11312  *
11313  * This function is called while driver attaches with the
11314  * HBA to setup the extra ring. The extra ring is used
11315  * only when driver needs to support target mode functionality
11316  * or IP over FC functionalities.
11317  *
11318  * This function is called with no lock held. SLI3 only.
11319  **/
11320 static int
11321 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11322 {
11323         struct lpfc_sli *psli;
11324         struct lpfc_sli_ring *pring;
11325
11326         psli = &phba->sli;
11327
11328         /* Adjust cmd/rsp ring iocb entries more evenly */
11329
11330         /* Take some away from the FCP ring */
11331         pring = &psli->sli3_ring[LPFC_FCP_RING];
11332         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11333         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11334         pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11335         pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11336
11337         /* and give them to the extra ring */
11338         pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11339
11340         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11341         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11342         pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11343         pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11344
11345         /* Setup default profile for this ring */
11346         pring->iotag_max = 4096;
11347         pring->num_mask = 1;
11348         pring->prt[0].profile = 0;      /* Mask 0 */
11349         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11350         pring->prt[0].type = phba->cfg_multi_ring_type;
11351         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11352         return 0;
11353 }
11354
11355 static void
11356 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11357                              struct lpfc_nodelist *ndlp)
11358 {
11359         unsigned long iflags;
11360         struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
11361
11362         spin_lock_irqsave(&phba->hbalock, iflags);
11363         if (!list_empty(&evtp->evt_listp)) {
11364                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11365                 return;
11366         }
11367
11368         /* Incrementing the reference count until the queued work is done. */
11369         evtp->evt_arg1  = lpfc_nlp_get(ndlp);
11370         if (!evtp->evt_arg1) {
11371                 spin_unlock_irqrestore(&phba->hbalock, iflags);
11372                 return;
11373         }
11374         evtp->evt = LPFC_EVT_RECOVER_PORT;
11375         list_add_tail(&evtp->evt_listp, &phba->work_list);
11376         spin_unlock_irqrestore(&phba->hbalock, iflags);
11377
11378         lpfc_worker_wake_up(phba);
11379 }
11380
11381 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11382  * @phba: Pointer to HBA context object.
11383  * @iocbq: Pointer to iocb object.
11384  *
11385  * The async_event handler calls this routine when it receives
11386  * an ASYNC_STATUS_CN event from the port.  The port generates
11387  * this event when an Abort Sequence request to an rport fails
11388  * twice in succession.  The abort could be originated by the
11389  * driver or by the port.  The ABTS could have been for an ELS
11390  * or FCP IO.  The port only generates this event when an ABTS
11391  * fails to complete after one retry.
11392  */
11393 static void
11394 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11395                           struct lpfc_iocbq *iocbq)
11396 {
11397         struct lpfc_nodelist *ndlp = NULL;
11398         uint16_t rpi = 0, vpi = 0;
11399         struct lpfc_vport *vport = NULL;
11400
11401         /* The rpi in the ulpContext is vport-sensitive. */
11402         vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11403         rpi = iocbq->iocb.ulpContext;
11404
11405         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11406                         "3092 Port generated ABTS async event "
11407                         "on vpi %d rpi %d status 0x%x\n",
11408                         vpi, rpi, iocbq->iocb.ulpStatus);
11409
11410         vport = lpfc_find_vport_by_vpid(phba, vpi);
11411         if (!vport)
11412                 goto err_exit;
11413         ndlp = lpfc_findnode_rpi(vport, rpi);
11414         if (!ndlp)
11415                 goto err_exit;
11416
11417         if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11418                 lpfc_sli_abts_recover_port(vport, ndlp);
11419         return;
11420
11421  err_exit:
11422         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11423                         "3095 Event Context not found, no "
11424                         "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11425                         vpi, rpi, iocbq->iocb.ulpStatus,
11426                         iocbq->iocb.ulpContext);
11427 }
11428
11429 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11430  * @phba: pointer to HBA context object.
11431  * @ndlp: nodelist pointer for the impacted rport.
11432  * @axri: pointer to the wcqe containing the failed exchange.
11433  *
11434  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11435  * port.  The port generates this event when an abort exchange request to an
11436  * rport fails twice in succession with no reply.  The abort could be originated
11437  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
11438  */
11439 void
11440 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11441                            struct lpfc_nodelist *ndlp,
11442                            struct sli4_wcqe_xri_aborted *axri)
11443 {
11444         uint32_t ext_status = 0;
11445
11446         if (!ndlp) {
11447                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11448                                 "3115 Node Context not found, driver "
11449                                 "ignoring abts err event\n");
11450                 return;
11451         }
11452
11453         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11454                         "3116 Port generated FCP XRI ABORT event on "
11455                         "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11456                         ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11457                         bf_get(lpfc_wcqe_xa_xri, axri),
11458                         bf_get(lpfc_wcqe_xa_status, axri),
11459                         axri->parameter);
11460
11461         /*
11462          * Catch the ABTS protocol failure case.  Older OCe FW releases returned
11463          * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11464          * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11465          */
11466         ext_status = axri->parameter & IOERR_PARAM_MASK;
11467         if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11468             ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11469                 lpfc_sli_post_recovery_event(phba, ndlp);
11470 }
11471
11472 /**
11473  * lpfc_sli_async_event_handler - ASYNC iocb handler function
11474  * @phba: Pointer to HBA context object.
11475  * @pring: Pointer to driver SLI ring object.
11476  * @iocbq: Pointer to iocb object.
11477  *
11478  * This function is called by the slow ring event handler
11479  * function when there is an ASYNC event iocb in the ring.
11480  * This function is called with no lock held.
11481  * Currently this function handles only temperature related
11482  * ASYNC events. The function decodes the temperature sensor
11483  * event message and posts events for the management applications.
11484  **/
11485 static void
11486 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11487         struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11488 {
11489         IOCB_t *icmd;
11490         uint16_t evt_code;
11491         struct temp_event temp_event_data;
11492         struct Scsi_Host *shost;
11493         uint32_t *iocb_w;
11494
11495         icmd = &iocbq->iocb;
11496         evt_code = icmd->un.asyncstat.evt_code;
11497
11498         switch (evt_code) {
11499         case ASYNC_TEMP_WARN:
11500         case ASYNC_TEMP_SAFE:
11501                 temp_event_data.data = (uint32_t) icmd->ulpContext;
11502                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11503                 if (evt_code == ASYNC_TEMP_WARN) {
11504                         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11505                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11506                                 "0347 Adapter is very hot, please take "
11507                                 "corrective action. temperature : %d Celsius\n",
11508                                 (uint32_t) icmd->ulpContext);
11509                 } else {
11510                         temp_event_data.event_code = LPFC_NORMAL_TEMP;
11511                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11512                                 "0340 Adapter temperature is OK now. "
11513                                 "temperature : %d Celsius\n",
11514                                 (uint32_t) icmd->ulpContext);
11515                 }
11516
11517                 /* Send temperature change event to applications */
11518                 shost = lpfc_shost_from_vport(phba->pport);
11519                 fc_host_post_vendor_event(shost, fc_get_event_number(),
11520                         sizeof(temp_event_data), (char *) &temp_event_data,
11521                         LPFC_NL_VENDOR_ID);
11522                 break;
11523         case ASYNC_STATUS_CN:
11524                 lpfc_sli_abts_err_handler(phba, iocbq);
11525                 break;
11526         default:
11527                 iocb_w = (uint32_t *) icmd;
11528                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11529                         "0346 Ring %d handler: unexpected ASYNC_STATUS"
11530                         " evt_code 0x%x\n"
11531                         "W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
11532                         "W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
11533                         "W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
11534                         "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11535                         pring->ringno, icmd->un.asyncstat.evt_code,
11536                         iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11537                         iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11538                         iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11539                         iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11540
11541                 break;
11542         }
11543 }
11544
11545
11546 /**
11547  * lpfc_sli4_setup - SLI ring setup function
11548  * @phba: Pointer to HBA context object.
11549  *
11550  * lpfc_sli_setup sets up rings of the SLI interface with
11551  * number of iocbs per ring and iotags. This function is
11552  * called while driver attach to the HBA and before the
11553  * interrupts are enabled. So there is no need for locking.
11554  *
11555  * This function always returns 0.
11556  **/
11557 int
11558 lpfc_sli4_setup(struct lpfc_hba *phba)
11559 {
11560         struct lpfc_sli_ring *pring;
11561
11562         pring = phba->sli4_hba.els_wq->pring;
11563         pring->num_mask = LPFC_MAX_RING_MASK;
11564         pring->prt[0].profile = 0;      /* Mask 0 */
11565         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11566         pring->prt[0].type = FC_TYPE_ELS;
11567         pring->prt[0].lpfc_sli_rcv_unsol_event =
11568             lpfc_els_unsol_event;
11569         pring->prt[1].profile = 0;      /* Mask 1 */
11570         pring->prt[1].rctl = FC_RCTL_ELS_REP;
11571         pring->prt[1].type = FC_TYPE_ELS;
11572         pring->prt[1].lpfc_sli_rcv_unsol_event =
11573             lpfc_els_unsol_event;
11574         pring->prt[2].profile = 0;      /* Mask 2 */
11575         /* NameServer Inquiry */
11576         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11577         /* NameServer */
11578         pring->prt[2].type = FC_TYPE_CT;
11579         pring->prt[2].lpfc_sli_rcv_unsol_event =
11580             lpfc_ct_unsol_event;
11581         pring->prt[3].profile = 0;      /* Mask 3 */
11582         /* NameServer response */
11583         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11584         /* NameServer */
11585         pring->prt[3].type = FC_TYPE_CT;
11586         pring->prt[3].lpfc_sli_rcv_unsol_event =
11587             lpfc_ct_unsol_event;
11588         return 0;
11589 }
11590
11591 /**
11592  * lpfc_sli_setup - SLI ring setup function
11593  * @phba: Pointer to HBA context object.
11594  *
11595  * lpfc_sli_setup sets up rings of the SLI interface with
11596  * number of iocbs per ring and iotags. This function is
11597  * called while driver attach to the HBA and before the
11598  * interrupts are enabled. So there is no need for locking.
11599  *
11600  * This function always returns 0. SLI3 only.
11601  **/
11602 int
11603 lpfc_sli_setup(struct lpfc_hba *phba)
11604 {
11605         int i, totiocbsize = 0;
11606         struct lpfc_sli *psli = &phba->sli;
11607         struct lpfc_sli_ring *pring;
11608
11609         psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11610         psli->sli_flag = 0;
11611
11612         psli->iocbq_lookup = NULL;
11613         psli->iocbq_lookup_len = 0;
11614         psli->last_iotag = 0;
11615
11616         for (i = 0; i < psli->num_rings; i++) {
11617                 pring = &psli->sli3_ring[i];
11618                 switch (i) {
11619                 case LPFC_FCP_RING:     /* ring 0 - FCP */
11620                         /* numCiocb and numRiocb are used in config_port */
11621                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11622                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11623                         pring->sli.sli3.numCiocb +=
11624                                 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11625                         pring->sli.sli3.numRiocb +=
11626                                 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11627                         pring->sli.sli3.numCiocb +=
11628                                 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11629                         pring->sli.sli3.numRiocb +=
11630                                 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11631                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11632                                                         SLI3_IOCB_CMD_SIZE :
11633                                                         SLI2_IOCB_CMD_SIZE;
11634                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11635                                                         SLI3_IOCB_RSP_SIZE :
11636                                                         SLI2_IOCB_RSP_SIZE;
11637                         pring->iotag_ctr = 0;
11638                         pring->iotag_max =
11639                             (phba->cfg_hba_queue_depth * 2);
11640                         pring->fast_iotag = pring->iotag_max;
11641                         pring->num_mask = 0;
11642                         break;
11643                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
11644                         /* numCiocb and numRiocb are used in config_port */
11645                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11646                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11647                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11648                                                         SLI3_IOCB_CMD_SIZE :
11649                                                         SLI2_IOCB_CMD_SIZE;
11650                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11651                                                         SLI3_IOCB_RSP_SIZE :
11652                                                         SLI2_IOCB_RSP_SIZE;
11653                         pring->iotag_max = phba->cfg_hba_queue_depth;
11654                         pring->num_mask = 0;
11655                         break;
11656                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
11657                         /* numCiocb and numRiocb are used in config_port */
11658                         pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11659                         pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11660                         pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11661                                                         SLI3_IOCB_CMD_SIZE :
11662                                                         SLI2_IOCB_CMD_SIZE;
11663                         pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11664                                                         SLI3_IOCB_RSP_SIZE :
11665                                                         SLI2_IOCB_RSP_SIZE;
11666                         pring->fast_iotag = 0;
11667                         pring->iotag_ctr = 0;
11668                         pring->iotag_max = 4096;
11669                         pring->lpfc_sli_rcv_async_status =
11670                                 lpfc_sli_async_event_handler;
11671                         pring->num_mask = LPFC_MAX_RING_MASK;
11672                         pring->prt[0].profile = 0;      /* Mask 0 */
11673                         pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11674                         pring->prt[0].type = FC_TYPE_ELS;
11675                         pring->prt[0].lpfc_sli_rcv_unsol_event =
11676                             lpfc_els_unsol_event;
11677                         pring->prt[1].profile = 0;      /* Mask 1 */
11678                         pring->prt[1].rctl = FC_RCTL_ELS_REP;
11679                         pring->prt[1].type = FC_TYPE_ELS;
11680                         pring->prt[1].lpfc_sli_rcv_unsol_event =
11681                             lpfc_els_unsol_event;
11682                         pring->prt[2].profile = 0;      /* Mask 2 */
11683                         /* NameServer Inquiry */
11684                         pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11685                         /* NameServer */
11686                         pring->prt[2].type = FC_TYPE_CT;
11687                         pring->prt[2].lpfc_sli_rcv_unsol_event =
11688                             lpfc_ct_unsol_event;
11689                         pring->prt[3].profile = 0;      /* Mask 3 */
11690                         /* NameServer response */
11691                         pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11692                         /* NameServer */
11693                         pring->prt[3].type = FC_TYPE_CT;
11694                         pring->prt[3].lpfc_sli_rcv_unsol_event =
11695                             lpfc_ct_unsol_event;
11696                         break;
11697                 }
11698                 totiocbsize += (pring->sli.sli3.numCiocb *
11699                         pring->sli.sli3.sizeCiocb) +
11700                         (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11701         }
11702         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11703                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
11704                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11705                        "SLI2 SLIM Data: x%x x%lx\n",
11706                        phba->brd_no, totiocbsize,
11707                        (unsigned long) MAX_SLIM_IOCB_SIZE);
11708         }
11709         if (phba->cfg_multi_ring_support == 2)
11710                 lpfc_extra_ring_setup(phba);
11711
11712         return 0;
11713 }
11714
11715 /**
11716  * lpfc_sli4_queue_init - Queue initialization function
11717  * @phba: Pointer to HBA context object.
11718  *
11719  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11720  * ring. This function also initializes ring indices of each ring.
11721  * This function is called during the initialization of the SLI
11722  * interface of an HBA.
11723  * This function is called with no lock held and always returns
11724  * 1.
11725  **/
11726 void
11727 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11728 {
11729         struct lpfc_sli *psli;
11730         struct lpfc_sli_ring *pring;
11731         int i;
11732
11733         psli = &phba->sli;
11734         spin_lock_irq(&phba->hbalock);
11735         INIT_LIST_HEAD(&psli->mboxq);
11736         INIT_LIST_HEAD(&psli->mboxq_cmpl);
11737         /* Initialize list headers for txq and txcmplq as double linked lists */
11738         for (i = 0; i < phba->cfg_hdw_queue; i++) {
11739                 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11740                 pring->flag = 0;
11741                 pring->ringno = LPFC_FCP_RING;
11742                 pring->txcmplq_cnt = 0;
11743                 INIT_LIST_HEAD(&pring->txq);
11744                 INIT_LIST_HEAD(&pring->txcmplq);
11745                 INIT_LIST_HEAD(&pring->iocb_continueq);
11746                 spin_lock_init(&pring->ring_lock);
11747         }
11748         pring = phba->sli4_hba.els_wq->pring;
11749         pring->flag = 0;
11750         pring->ringno = LPFC_ELS_RING;
11751         pring->txcmplq_cnt = 0;
11752         INIT_LIST_HEAD(&pring->txq);
11753         INIT_LIST_HEAD(&pring->txcmplq);
11754         INIT_LIST_HEAD(&pring->iocb_continueq);
11755         spin_lock_init(&pring->ring_lock);
11756
11757         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11758                 pring = phba->sli4_hba.nvmels_wq->pring;
11759                 pring->flag = 0;
11760                 pring->ringno = LPFC_ELS_RING;
11761                 pring->txcmplq_cnt = 0;
11762                 INIT_LIST_HEAD(&pring->txq);
11763                 INIT_LIST_HEAD(&pring->txcmplq);
11764                 INIT_LIST_HEAD(&pring->iocb_continueq);
11765                 spin_lock_init(&pring->ring_lock);
11766         }
11767
11768         spin_unlock_irq(&phba->hbalock);
11769 }
11770
11771 /**
11772  * lpfc_sli_queue_init - Queue initialization function
11773  * @phba: Pointer to HBA context object.
11774  *
11775  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11776  * ring. This function also initializes ring indices of each ring.
11777  * This function is called during the initialization of the SLI
11778  * interface of an HBA.
11779  * This function is called with no lock held and always returns
11780  * 1.
11781  **/
11782 void
11783 lpfc_sli_queue_init(struct lpfc_hba *phba)
11784 {
11785         struct lpfc_sli *psli;
11786         struct lpfc_sli_ring *pring;
11787         int i;
11788
11789         psli = &phba->sli;
11790         spin_lock_irq(&phba->hbalock);
11791         INIT_LIST_HEAD(&psli->mboxq);
11792         INIT_LIST_HEAD(&psli->mboxq_cmpl);
11793         /* Initialize list headers for txq and txcmplq as double linked lists */
11794         for (i = 0; i < psli->num_rings; i++) {
11795                 pring = &psli->sli3_ring[i];
11796                 pring->ringno = i;
11797                 pring->sli.sli3.next_cmdidx  = 0;
11798                 pring->sli.sli3.local_getidx = 0;
11799                 pring->sli.sli3.cmdidx = 0;
11800                 INIT_LIST_HEAD(&pring->iocb_continueq);
11801                 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11802                 INIT_LIST_HEAD(&pring->postbufq);
11803                 pring->flag = 0;
11804                 INIT_LIST_HEAD(&pring->txq);
11805                 INIT_LIST_HEAD(&pring->txcmplq);
11806                 spin_lock_init(&pring->ring_lock);
11807         }
11808         spin_unlock_irq(&phba->hbalock);
11809 }
11810
11811 /**
11812  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11813  * @phba: Pointer to HBA context object.
11814  *
11815  * This routine flushes the mailbox command subsystem. It will unconditionally
11816  * flush all the mailbox commands in the three possible stages in the mailbox
11817  * command sub-system: pending mailbox command queue; the outstanding mailbox
11818  * command; and completed mailbox command queue. It is caller's responsibility
11819  * to make sure that the driver is in the proper state to flush the mailbox
11820  * command sub-system. Namely, the posting of mailbox commands into the
11821  * pending mailbox command queue from the various clients must be stopped;
11822  * either the HBA is in a state that it will never works on the outstanding
11823  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11824  * mailbox command has been completed.
11825  **/
11826 static void
11827 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11828 {
11829         LIST_HEAD(completions);
11830         struct lpfc_sli *psli = &phba->sli;
11831         LPFC_MBOXQ_t *pmb;
11832         unsigned long iflag;
11833
11834         /* Disable softirqs, including timers from obtaining phba->hbalock */
11835         local_bh_disable();
11836
11837         /* Flush all the mailbox commands in the mbox system */
11838         spin_lock_irqsave(&phba->hbalock, iflag);
11839
11840         /* The pending mailbox command queue */
11841         list_splice_init(&phba->sli.mboxq, &completions);
11842         /* The outstanding active mailbox command */
11843         if (psli->mbox_active) {
11844                 list_add_tail(&psli->mbox_active->list, &completions);
11845                 psli->mbox_active = NULL;
11846                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11847         }
11848         /* The completed mailbox command queue */
11849         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11850         spin_unlock_irqrestore(&phba->hbalock, iflag);
11851
11852         /* Enable softirqs again, done with phba->hbalock */
11853         local_bh_enable();
11854
11855         /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11856         while (!list_empty(&completions)) {
11857                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11858                 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11859                 if (pmb->mbox_cmpl)
11860                         pmb->mbox_cmpl(phba, pmb);
11861         }
11862 }
11863
11864 /**
11865  * lpfc_sli_host_down - Vport cleanup function
11866  * @vport: Pointer to virtual port object.
11867  *
11868  * lpfc_sli_host_down is called to clean up the resources
11869  * associated with a vport before destroying virtual
11870  * port data structures.
11871  * This function does following operations:
11872  * - Free discovery resources associated with this virtual
11873  *   port.
11874  * - Free iocbs associated with this virtual port in
11875  *   the txq.
11876  * - Send abort for all iocb commands associated with this
11877  *   vport in txcmplq.
11878  *
11879  * This function is called with no lock held and always returns 1.
11880  **/
11881 int
11882 lpfc_sli_host_down(struct lpfc_vport *vport)
11883 {
11884         LIST_HEAD(completions);
11885         struct lpfc_hba *phba = vport->phba;
11886         struct lpfc_sli *psli = &phba->sli;
11887         struct lpfc_queue *qp = NULL;
11888         struct lpfc_sli_ring *pring;
11889         struct lpfc_iocbq *iocb, *next_iocb;
11890         int i;
11891         unsigned long flags = 0;
11892         uint16_t prev_pring_flag;
11893
11894         lpfc_cleanup_discovery_resources(vport);
11895
11896         spin_lock_irqsave(&phba->hbalock, flags);
11897
11898         /*
11899          * Error everything on the txq since these iocbs
11900          * have not been given to the FW yet.
11901          * Also issue ABTS for everything on the txcmplq
11902          */
11903         if (phba->sli_rev != LPFC_SLI_REV4) {
11904                 for (i = 0; i < psli->num_rings; i++) {
11905                         pring = &psli->sli3_ring[i];
11906                         prev_pring_flag = pring->flag;
11907                         /* Only slow rings */
11908                         if (pring->ringno == LPFC_ELS_RING) {
11909                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11910                                 /* Set the lpfc data pending flag */
11911                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11912                         }
11913                         list_for_each_entry_safe(iocb, next_iocb,
11914                                                  &pring->txq, list) {
11915                                 if (iocb->vport != vport)
11916                                         continue;
11917                                 list_move_tail(&iocb->list, &completions);
11918                         }
11919                         list_for_each_entry_safe(iocb, next_iocb,
11920                                                  &pring->txcmplq, list) {
11921                                 if (iocb->vport != vport)
11922                                         continue;
11923                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11924                                                            NULL);
11925                         }
11926                         pring->flag = prev_pring_flag;
11927                 }
11928         } else {
11929                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11930                         pring = qp->pring;
11931                         if (!pring)
11932                                 continue;
11933                         if (pring == phba->sli4_hba.els_wq->pring) {
11934                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11935                                 /* Set the lpfc data pending flag */
11936                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
11937                         }
11938                         prev_pring_flag = pring->flag;
11939                         spin_lock(&pring->ring_lock);
11940                         list_for_each_entry_safe(iocb, next_iocb,
11941                                                  &pring->txq, list) {
11942                                 if (iocb->vport != vport)
11943                                         continue;
11944                                 list_move_tail(&iocb->list, &completions);
11945                         }
11946                         spin_unlock(&pring->ring_lock);
11947                         list_for_each_entry_safe(iocb, next_iocb,
11948                                                  &pring->txcmplq, list) {
11949                                 if (iocb->vport != vport)
11950                                         continue;
11951                                 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11952                                                            NULL);
11953                         }
11954                         pring->flag = prev_pring_flag;
11955                 }
11956         }
11957         spin_unlock_irqrestore(&phba->hbalock, flags);
11958
11959         /* Make sure HBA is alive */
11960         lpfc_issue_hb_tmo(phba);
11961
11962         /* Cancel all the IOCBs from the completions list */
11963         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11964                               IOERR_SLI_DOWN);
11965         return 1;
11966 }
11967
11968 /**
11969  * lpfc_sli_hba_down - Resource cleanup function for the HBA
11970  * @phba: Pointer to HBA context object.
11971  *
11972  * This function cleans up all iocb, buffers, mailbox commands
11973  * while shutting down the HBA. This function is called with no
11974  * lock held and always returns 1.
11975  * This function does the following to cleanup driver resources:
11976  * - Free discovery resources for each virtual port
11977  * - Cleanup any pending fabric iocbs
11978  * - Iterate through the iocb txq and free each entry
11979  *   in the list.
11980  * - Free up any buffer posted to the HBA
11981  * - Free mailbox commands in the mailbox queue.
11982  **/
11983 int
11984 lpfc_sli_hba_down(struct lpfc_hba *phba)
11985 {
11986         LIST_HEAD(completions);
11987         struct lpfc_sli *psli = &phba->sli;
11988         struct lpfc_queue *qp = NULL;
11989         struct lpfc_sli_ring *pring;
11990         struct lpfc_dmabuf *buf_ptr;
11991         unsigned long flags = 0;
11992         int i;
11993
11994         /* Shutdown the mailbox command sub-system */
11995         lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11996
11997         lpfc_hba_down_prep(phba);
11998
11999         /* Disable softirqs, including timers from obtaining phba->hbalock */
12000         local_bh_disable();
12001
12002         lpfc_fabric_abort_hba(phba);
12003
12004         spin_lock_irqsave(&phba->hbalock, flags);
12005
12006         /*
12007          * Error everything on the txq since these iocbs
12008          * have not been given to the FW yet.
12009          */
12010         if (phba->sli_rev != LPFC_SLI_REV4) {
12011                 for (i = 0; i < psli->num_rings; i++) {
12012                         pring = &psli->sli3_ring[i];
12013                         /* Only slow rings */
12014                         if (pring->ringno == LPFC_ELS_RING) {
12015                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12016                                 /* Set the lpfc data pending flag */
12017                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
12018                         }
12019                         list_splice_init(&pring->txq, &completions);
12020                 }
12021         } else {
12022                 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12023                         pring = qp->pring;
12024                         if (!pring)
12025                                 continue;
12026                         spin_lock(&pring->ring_lock);
12027                         list_splice_init(&pring->txq, &completions);
12028                         spin_unlock(&pring->ring_lock);
12029                         if (pring == phba->sli4_hba.els_wq->pring) {
12030                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
12031                                 /* Set the lpfc data pending flag */
12032                                 set_bit(LPFC_DATA_READY, &phba->data_flags);
12033                         }
12034                 }
12035         }
12036         spin_unlock_irqrestore(&phba->hbalock, flags);
12037
12038         /* Cancel all the IOCBs from the completions list */
12039         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12040                               IOERR_SLI_DOWN);
12041
12042         spin_lock_irqsave(&phba->hbalock, flags);
12043         list_splice_init(&phba->elsbuf, &completions);
12044         phba->elsbuf_cnt = 0;
12045         phba->elsbuf_prev_cnt = 0;
12046         spin_unlock_irqrestore(&phba->hbalock, flags);
12047
12048         while (!list_empty(&completions)) {
12049                 list_remove_head(&completions, buf_ptr,
12050                         struct lpfc_dmabuf, list);
12051                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12052                 kfree(buf_ptr);
12053         }
12054
12055         /* Enable softirqs again, done with phba->hbalock */
12056         local_bh_enable();
12057
12058         /* Return any active mbox cmds */
12059         del_timer_sync(&psli->mbox_tmo);
12060
12061         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12062         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12063         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12064
12065         return 1;
12066 }
12067
12068 /**
12069  * lpfc_sli_pcimem_bcopy - SLI memory copy function
12070  * @srcp: Source memory pointer.
12071  * @destp: Destination memory pointer.
12072  * @cnt: Number of words required to be copied.
12073  *
12074  * This function is used for copying data between driver memory
12075  * and the SLI memory. This function also changes the endianness
12076  * of each word if native endianness is different from SLI
12077  * endianness. This function can be called with or without
12078  * lock.
12079  **/
12080 void
12081 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12082 {
12083         uint32_t *src = srcp;
12084         uint32_t *dest = destp;
12085         uint32_t ldata;
12086         int i;
12087
12088         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12089                 ldata = *src;
12090                 ldata = le32_to_cpu(ldata);
12091                 *dest = ldata;
12092                 src++;
12093                 dest++;
12094         }
12095 }
12096
12097
12098 /**
12099  * lpfc_sli_bemem_bcopy - SLI memory copy function
12100  * @srcp: Source memory pointer.
12101  * @destp: Destination memory pointer.
12102  * @cnt: Number of words required to be copied.
12103  *
12104  * This function is used for copying data between a data structure
12105  * with big endian representation to local endianness.
12106  * This function can be called with or without lock.
12107  **/
12108 void
12109 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12110 {
12111         uint32_t *src = srcp;
12112         uint32_t *dest = destp;
12113         uint32_t ldata;
12114         int i;
12115
12116         for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12117                 ldata = *src;
12118                 ldata = be32_to_cpu(ldata);
12119                 *dest = ldata;
12120                 src++;
12121                 dest++;
12122         }
12123 }
12124
12125 /**
12126  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12127  * @phba: Pointer to HBA context object.
12128  * @pring: Pointer to driver SLI ring object.
12129  * @mp: Pointer to driver buffer object.
12130  *
12131  * This function is called with no lock held.
12132  * It always return zero after adding the buffer to the postbufq
12133  * buffer list.
12134  **/
12135 int
12136 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12137                          struct lpfc_dmabuf *mp)
12138 {
12139         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12140            later */
12141         spin_lock_irq(&phba->hbalock);
12142         list_add_tail(&mp->list, &pring->postbufq);
12143         pring->postbufq_cnt++;
12144         spin_unlock_irq(&phba->hbalock);
12145         return 0;
12146 }
12147
12148 /**
12149  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12150  * @phba: Pointer to HBA context object.
12151  *
12152  * When HBQ is enabled, buffers are searched based on tags. This function
12153  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12154  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12155  * does not conflict with tags of buffer posted for unsolicited events.
12156  * The function returns the allocated tag. The function is called with
12157  * no locks held.
12158  **/
12159 uint32_t
12160 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12161 {
12162         spin_lock_irq(&phba->hbalock);
12163         phba->buffer_tag_count++;
12164         /*
12165          * Always set the QUE_BUFTAG_BIT to distiguish between
12166          * a tag assigned by HBQ.
12167          */
12168         phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12169         spin_unlock_irq(&phba->hbalock);
12170         return phba->buffer_tag_count;
12171 }
12172
12173 /**
12174  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12175  * @phba: Pointer to HBA context object.
12176  * @pring: Pointer to driver SLI ring object.
12177  * @tag: Buffer tag.
12178  *
12179  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12180  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12181  * iocb is posted to the response ring with the tag of the buffer.
12182  * This function searches the pring->postbufq list using the tag
12183  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12184  * iocb. If the buffer is found then lpfc_dmabuf object of the
12185  * buffer is returned to the caller else NULL is returned.
12186  * This function is called with no lock held.
12187  **/
12188 struct lpfc_dmabuf *
12189 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12190                         uint32_t tag)
12191 {
12192         struct lpfc_dmabuf *mp, *next_mp;
12193         struct list_head *slp = &pring->postbufq;
12194
12195         /* Search postbufq, from the beginning, looking for a match on tag */
12196         spin_lock_irq(&phba->hbalock);
12197         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12198                 if (mp->buffer_tag == tag) {
12199                         list_del_init(&mp->list);
12200                         pring->postbufq_cnt--;
12201                         spin_unlock_irq(&phba->hbalock);
12202                         return mp;
12203                 }
12204         }
12205
12206         spin_unlock_irq(&phba->hbalock);
12207         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12208                         "0402 Cannot find virtual addr for buffer tag on "
12209                         "ring %d Data x%lx x%px x%px x%x\n",
12210                         pring->ringno, (unsigned long) tag,
12211                         slp->next, slp->prev, pring->postbufq_cnt);
12212
12213         return NULL;
12214 }
12215
12216 /**
12217  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12218  * @phba: Pointer to HBA context object.
12219  * @pring: Pointer to driver SLI ring object.
12220  * @phys: DMA address of the buffer.
12221  *
12222  * This function searches the buffer list using the dma_address
12223  * of unsolicited event to find the driver's lpfc_dmabuf object
12224  * corresponding to the dma_address. The function returns the
12225  * lpfc_dmabuf object if a buffer is found else it returns NULL.
12226  * This function is called by the ct and els unsolicited event
12227  * handlers to get the buffer associated with the unsolicited
12228  * event.
12229  *
12230  * This function is called with no lock held.
12231  **/
12232 struct lpfc_dmabuf *
12233 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12234                          dma_addr_t phys)
12235 {
12236         struct lpfc_dmabuf *mp, *next_mp;
12237         struct list_head *slp = &pring->postbufq;
12238
12239         /* Search postbufq, from the beginning, looking for a match on phys */
12240         spin_lock_irq(&phba->hbalock);
12241         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12242                 if (mp->phys == phys) {
12243                         list_del_init(&mp->list);
12244                         pring->postbufq_cnt--;
12245                         spin_unlock_irq(&phba->hbalock);
12246                         return mp;
12247                 }
12248         }
12249
12250         spin_unlock_irq(&phba->hbalock);
12251         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12252                         "0410 Cannot find virtual addr for mapped buf on "
12253                         "ring %d Data x%llx x%px x%px x%x\n",
12254                         pring->ringno, (unsigned long long)phys,
12255                         slp->next, slp->prev, pring->postbufq_cnt);
12256         return NULL;
12257 }
12258
12259 /**
12260  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12261  * @phba: Pointer to HBA context object.
12262  * @cmdiocb: Pointer to driver command iocb object.
12263  * @rspiocb: Pointer to driver response iocb object.
12264  *
12265  * This function is the completion handler for the abort iocbs for
12266  * ELS commands. This function is called from the ELS ring event
12267  * handler with no lock held. This function frees memory resources
12268  * associated with the abort iocb.
12269  **/
12270 static void
12271 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12272                         struct lpfc_iocbq *rspiocb)
12273 {
12274         u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12275         u32 ulp_word4 = get_job_word4(phba, rspiocb);
12276         u8 cmnd = get_job_cmnd(phba, cmdiocb);
12277
12278         if (ulp_status) {
12279                 /*
12280                  * Assume that the port already completed and returned, or
12281                  * will return the iocb. Just Log the message.
12282                  */
12283                 if (phba->sli_rev < LPFC_SLI_REV4) {
12284                         if (cmnd == CMD_ABORT_XRI_CX &&
12285                             ulp_status == IOSTAT_LOCAL_REJECT &&
12286                             ulp_word4 == IOERR_ABORT_REQUESTED) {
12287                                 goto release_iocb;
12288                         }
12289                 }
12290
12291                 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12292                                 "0327 Cannot abort els iocb x%px "
12293                                 "with io cmd xri %x abort tag : x%x, "
12294                                 "abort status %x abort code %x\n",
12295                                 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12296                                 (phba->sli_rev == LPFC_SLI_REV4) ?
12297                                 get_wqe_reqtag(cmdiocb) :
12298                                 cmdiocb->iocb.un.acxri.abortContextTag,
12299                                 ulp_status, ulp_word4);
12300
12301         }
12302 release_iocb:
12303         lpfc_sli_release_iocbq(phba, cmdiocb);
12304         return;
12305 }
12306
12307 /**
12308  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12309  * @phba: Pointer to HBA context object.
12310  * @cmdiocb: Pointer to driver command iocb object.
12311  * @rspiocb: Pointer to driver response iocb object.
12312  *
12313  * The function is called from SLI ring event handler with no
12314  * lock held. This function is the completion handler for ELS commands
12315  * which are aborted. The function frees memory resources used for
12316  * the aborted ELS commands.
12317  **/
12318 void
12319 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12320                      struct lpfc_iocbq *rspiocb)
12321 {
12322         struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12323         IOCB_t *irsp;
12324         LPFC_MBOXQ_t *mbox;
12325         u32 ulp_command, ulp_status, ulp_word4, iotag;
12326
12327         ulp_command = get_job_cmnd(phba, cmdiocb);
12328         ulp_status = get_job_ulpstatus(phba, rspiocb);
12329         ulp_word4 = get_job_word4(phba, rspiocb);
12330
12331         if (phba->sli_rev == LPFC_SLI_REV4) {
12332                 iotag = get_wqe_reqtag(cmdiocb);
12333         } else {
12334                 irsp = &rspiocb->iocb;
12335                 iotag = irsp->ulpIoTag;
12336
12337                 /* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12338                  * The MBX_REG_LOGIN64 mbox command is freed back to the
12339                  * mbox_mem_pool here.
12340                  */
12341                 if (cmdiocb->context_un.mbox) {
12342                         mbox = cmdiocb->context_un.mbox;
12343                         lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12344                         cmdiocb->context_un.mbox = NULL;
12345                 }
12346         }
12347
12348         /* ELS cmd tag <ulpIoTag> completes */
12349         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12350                         "0139 Ignoring ELS cmd code x%x completion Data: "
12351                         "x%x x%x x%x x%px\n",
12352                         ulp_command, ulp_status, ulp_word4, iotag,
12353                         cmdiocb->ndlp);
12354         /*
12355          * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12356          * if exchange is busy.
12357          */
12358         if (ulp_command == CMD_GEN_REQUEST64_CR)
12359                 lpfc_ct_free_iocb(phba, cmdiocb);
12360         else
12361                 lpfc_els_free_iocb(phba, cmdiocb);
12362
12363         lpfc_nlp_put(ndlp);
12364 }
12365
12366 /**
12367  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12368  * @phba: Pointer to HBA context object.
12369  * @pring: Pointer to driver SLI ring object.
12370  * @cmdiocb: Pointer to driver command iocb object.
12371  * @cmpl: completion function.
12372  *
12373  * This function issues an abort iocb for the provided command iocb. In case
12374  * of unloading, the abort iocb will not be issued to commands on the ELS
12375  * ring. Instead, the callback function shall be changed to those commands
12376  * so that nothing happens when them finishes. This function is called with
12377  * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12378  * when the command iocb is an abort request.
12379  *
12380  **/
12381 int
12382 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12383                            struct lpfc_iocbq *cmdiocb, void *cmpl)
12384 {
12385         struct lpfc_vport *vport = cmdiocb->vport;
12386         struct lpfc_iocbq *abtsiocbp;
12387         int retval = IOCB_ERROR;
12388         unsigned long iflags;
12389         struct lpfc_nodelist *ndlp = NULL;
12390         u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12391         u16 ulp_context, iotag;
12392         bool ia;
12393
12394         /*
12395          * There are certain command types we don't want to abort.  And we
12396          * don't want to abort commands that are already in the process of
12397          * being aborted.
12398          */
12399         if (ulp_command == CMD_ABORT_XRI_WQE ||
12400             ulp_command == CMD_ABORT_XRI_CN ||
12401             ulp_command == CMD_CLOSE_XRI_CN ||
12402             cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12403                 return IOCB_ABORTING;
12404
12405         if (!pring) {
12406                 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12407                         cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12408                 else
12409                         cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12410                 return retval;
12411         }
12412
12413         /*
12414          * If we're unloading, don't abort iocb on the ELS ring, but change
12415          * the callback so that nothing happens when it finishes.
12416          */
12417         if ((vport->load_flag & FC_UNLOADING) &&
12418             pring->ringno == LPFC_ELS_RING) {
12419                 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12420                         cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12421                 else
12422                         cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12423                 return retval;
12424         }
12425
12426         /* issue ABTS for this IOCB based on iotag */
12427         abtsiocbp = __lpfc_sli_get_iocbq(phba);
12428         if (abtsiocbp == NULL)
12429                 return IOCB_NORESOURCE;
12430
12431         /* This signals the response to set the correct status
12432          * before calling the completion handler
12433          */
12434         cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12435
12436         if (phba->sli_rev == LPFC_SLI_REV4) {
12437                 ulp_context = cmdiocb->sli4_xritag;
12438                 iotag = abtsiocbp->iotag;
12439         } else {
12440                 iotag = cmdiocb->iocb.ulpIoTag;
12441                 if (pring->ringno == LPFC_ELS_RING) {
12442                         ndlp = cmdiocb->ndlp;
12443                         ulp_context = ndlp->nlp_rpi;
12444                 } else {
12445                         ulp_context = cmdiocb->iocb.ulpContext;
12446                 }
12447         }
12448
12449         if (phba->link_state < LPFC_LINK_UP ||
12450             (phba->sli_rev == LPFC_SLI_REV4 &&
12451              phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12452             (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12453                 ia = true;
12454         else
12455                 ia = false;
12456
12457         lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12458                                 cmdiocb->iocb.ulpClass,
12459                                 LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12460
12461         abtsiocbp->vport = vport;
12462
12463         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12464         abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12465         if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12466                 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12467
12468         if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12469                 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12470
12471         if (cmpl)
12472                 abtsiocbp->cmd_cmpl = cmpl;
12473         else
12474                 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12475         abtsiocbp->vport = vport;
12476
12477         if (phba->sli_rev == LPFC_SLI_REV4) {
12478                 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12479                 if (unlikely(pring == NULL))
12480                         goto abort_iotag_exit;
12481                 /* Note: both hbalock and ring_lock need to be set here */
12482                 spin_lock_irqsave(&pring->ring_lock, iflags);
12483                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12484                         abtsiocbp, 0);
12485                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12486         } else {
12487                 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12488                         abtsiocbp, 0);
12489         }
12490
12491 abort_iotag_exit:
12492
12493         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12494                          "0339 Abort IO XRI x%x, Original iotag x%x, "
12495                          "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12496                          "retval x%x\n",
12497                          ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12498                          cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12499                          retval);
12500         if (retval) {
12501                 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12502                 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12503         }
12504
12505         /*
12506          * Caller to this routine should check for IOCB_ERROR
12507          * and handle it properly.  This routine no longer removes
12508          * iocb off txcmplq and call compl in case of IOCB_ERROR.
12509          */
12510         return retval;
12511 }
12512
12513 /**
12514  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12515  * @phba: pointer to lpfc HBA data structure.
12516  *
12517  * This routine will abort all pending and outstanding iocbs to an HBA.
12518  **/
12519 void
12520 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12521 {
12522         struct lpfc_sli *psli = &phba->sli;
12523         struct lpfc_sli_ring *pring;
12524         struct lpfc_queue *qp = NULL;
12525         int i;
12526
12527         if (phba->sli_rev != LPFC_SLI_REV4) {
12528                 for (i = 0; i < psli->num_rings; i++) {
12529                         pring = &psli->sli3_ring[i];
12530                         lpfc_sli_abort_iocb_ring(phba, pring);
12531                 }
12532                 return;
12533         }
12534         list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12535                 pring = qp->pring;
12536                 if (!pring)
12537                         continue;
12538                 lpfc_sli_abort_iocb_ring(phba, pring);
12539         }
12540 }
12541
12542 /**
12543  * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12544  * @iocbq: Pointer to iocb object.
12545  * @vport: Pointer to driver virtual port object.
12546  *
12547  * This function acts as an iocb filter for functions which abort FCP iocbs.
12548  *
12549  * Return values
12550  * -ENODEV, if a null iocb or vport ptr is encountered
12551  * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12552  *          driver already started the abort process, or is an abort iocb itself
12553  * 0, passes criteria for aborting the FCP I/O iocb
12554  **/
12555 static int
12556 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12557                                      struct lpfc_vport *vport)
12558 {
12559         u8 ulp_command;
12560
12561         /* No null ptr vports */
12562         if (!iocbq || iocbq->vport != vport)
12563                 return -ENODEV;
12564
12565         /* iocb must be for FCP IO, already exists on the TX cmpl queue,
12566          * can't be premarked as driver aborted, nor be an ABORT iocb itself
12567          */
12568         ulp_command = get_job_cmnd(vport->phba, iocbq);
12569         if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12570             !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12571             (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12572             (ulp_command == CMD_ABORT_XRI_CN ||
12573              ulp_command == CMD_CLOSE_XRI_CN ||
12574              ulp_command == CMD_ABORT_XRI_WQE))
12575                 return -EINVAL;
12576
12577         return 0;
12578 }
12579
12580 /**
12581  * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12582  * @iocbq: Pointer to driver iocb object.
12583  * @vport: Pointer to driver virtual port object.
12584  * @tgt_id: SCSI ID of the target.
12585  * @lun_id: LUN ID of the scsi device.
12586  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12587  *
12588  * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12589  * host.
12590  *
12591  * It will return
12592  * 0 if the filtering criteria is met for the given iocb and will return
12593  * 1 if the filtering criteria is not met.
12594  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12595  * given iocb is for the SCSI device specified by vport, tgt_id and
12596  * lun_id parameter.
12597  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
12598  * given iocb is for the SCSI target specified by vport and tgt_id
12599  * parameters.
12600  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12601  * given iocb is for the SCSI host associated with the given vport.
12602  * This function is called with no locks held.
12603  **/
12604 static int
12605 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12606                            uint16_t tgt_id, uint64_t lun_id,
12607                            lpfc_ctx_cmd ctx_cmd)
12608 {
12609         struct lpfc_io_buf *lpfc_cmd;
12610         int rc = 1;
12611
12612         lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12613
12614         if (lpfc_cmd->pCmd == NULL)
12615                 return rc;
12616
12617         switch (ctx_cmd) {
12618         case LPFC_CTX_LUN:
12619                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12620                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12621                     (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12622                         rc = 0;
12623                 break;
12624         case LPFC_CTX_TGT:
12625                 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12626                     (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12627                         rc = 0;
12628                 break;
12629         case LPFC_CTX_HOST:
12630                 rc = 0;
12631                 break;
12632         default:
12633                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12634                         __func__, ctx_cmd);
12635                 break;
12636         }
12637
12638         return rc;
12639 }
12640
12641 /**
12642  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12643  * @vport: Pointer to virtual port.
12644  * @tgt_id: SCSI ID of the target.
12645  * @lun_id: LUN ID of the scsi device.
12646  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12647  *
12648  * This function returns number of FCP commands pending for the vport.
12649  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12650  * commands pending on the vport associated with SCSI device specified
12651  * by tgt_id and lun_id parameters.
12652  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12653  * commands pending on the vport associated with SCSI target specified
12654  * by tgt_id parameter.
12655  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12656  * commands pending on the vport.
12657  * This function returns the number of iocbs which satisfy the filter.
12658  * This function is called without any lock held.
12659  **/
12660 int
12661 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12662                   lpfc_ctx_cmd ctx_cmd)
12663 {
12664         struct lpfc_hba *phba = vport->phba;
12665         struct lpfc_iocbq *iocbq;
12666         int sum, i;
12667         unsigned long iflags;
12668         u8 ulp_command;
12669
12670         spin_lock_irqsave(&phba->hbalock, iflags);
12671         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12672                 iocbq = phba->sli.iocbq_lookup[i];
12673
12674                 if (!iocbq || iocbq->vport != vport)
12675                         continue;
12676                 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12677                     !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12678                         continue;
12679
12680                 /* Include counting outstanding aborts */
12681                 ulp_command = get_job_cmnd(phba, iocbq);
12682                 if (ulp_command == CMD_ABORT_XRI_CN ||
12683                     ulp_command == CMD_CLOSE_XRI_CN ||
12684                     ulp_command == CMD_ABORT_XRI_WQE) {
12685                         sum++;
12686                         continue;
12687                 }
12688
12689                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12690                                                ctx_cmd) == 0)
12691                         sum++;
12692         }
12693         spin_unlock_irqrestore(&phba->hbalock, iflags);
12694
12695         return sum;
12696 }
12697
12698 /**
12699  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12700  * @phba: Pointer to HBA context object
12701  * @cmdiocb: Pointer to command iocb object.
12702  * @rspiocb: Pointer to response iocb object.
12703  *
12704  * This function is called when an aborted FCP iocb completes. This
12705  * function is called by the ring event handler with no lock held.
12706  * This function frees the iocb.
12707  **/
12708 void
12709 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12710                         struct lpfc_iocbq *rspiocb)
12711 {
12712         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12713                         "3096 ABORT_XRI_CX completing on rpi x%x "
12714                         "original iotag x%x, abort cmd iotag x%x "
12715                         "status 0x%x, reason 0x%x\n",
12716                         (phba->sli_rev == LPFC_SLI_REV4) ?
12717                         cmdiocb->sli4_xritag :
12718                         cmdiocb->iocb.un.acxri.abortContextTag,
12719                         get_job_abtsiotag(phba, cmdiocb),
12720                         cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12721                         get_job_word4(phba, rspiocb));
12722         lpfc_sli_release_iocbq(phba, cmdiocb);
12723         return;
12724 }
12725
12726 /**
12727  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12728  * @vport: Pointer to virtual port.
12729  * @tgt_id: SCSI ID of the target.
12730  * @lun_id: LUN ID of the scsi device.
12731  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12732  *
12733  * This function sends an abort command for every SCSI command
12734  * associated with the given virtual port pending on the ring
12735  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12736  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12737  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12738  * followed by lpfc_sli_validate_fcp_iocb.
12739  *
12740  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12741  * FCP iocbs associated with lun specified by tgt_id and lun_id
12742  * parameters
12743  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12744  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12745  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12746  * FCP iocbs associated with virtual port.
12747  * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12748  * lpfc_sli4_calc_ring is used.
12749  * This function returns number of iocbs it failed to abort.
12750  * This function is called with no locks held.
12751  **/
12752 int
12753 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12754                     lpfc_ctx_cmd abort_cmd)
12755 {
12756         struct lpfc_hba *phba = vport->phba;
12757         struct lpfc_sli_ring *pring = NULL;
12758         struct lpfc_iocbq *iocbq;
12759         int errcnt = 0, ret_val = 0;
12760         unsigned long iflags;
12761         int i;
12762
12763         /* all I/Os are in process of being flushed */
12764         if (phba->hba_flag & HBA_IOQ_FLUSH)
12765                 return errcnt;
12766
12767         for (i = 1; i <= phba->sli.last_iotag; i++) {
12768                 iocbq = phba->sli.iocbq_lookup[i];
12769
12770                 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12771                         continue;
12772
12773                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12774                                                abort_cmd) != 0)
12775                         continue;
12776
12777                 spin_lock_irqsave(&phba->hbalock, iflags);
12778                 if (phba->sli_rev == LPFC_SLI_REV3) {
12779                         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12780                 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12781                         pring = lpfc_sli4_calc_ring(phba, iocbq);
12782                 }
12783                 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12784                                                      lpfc_sli_abort_fcp_cmpl);
12785                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12786                 if (ret_val != IOCB_SUCCESS)
12787                         errcnt++;
12788         }
12789
12790         return errcnt;
12791 }
12792
12793 /**
12794  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12795  * @vport: Pointer to virtual port.
12796  * @pring: Pointer to driver SLI ring object.
12797  * @tgt_id: SCSI ID of the target.
12798  * @lun_id: LUN ID of the scsi device.
12799  * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12800  *
12801  * This function sends an abort command for every SCSI command
12802  * associated with the given virtual port pending on the ring
12803  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12804  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12805  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12806  * followed by lpfc_sli_validate_fcp_iocb.
12807  *
12808  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12809  * FCP iocbs associated with lun specified by tgt_id and lun_id
12810  * parameters
12811  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12812  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12813  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12814  * FCP iocbs associated with virtual port.
12815  * This function returns number of iocbs it aborted .
12816  * This function is called with no locks held right after a taskmgmt
12817  * command is sent.
12818  **/
12819 int
12820 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12821                         uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12822 {
12823         struct lpfc_hba *phba = vport->phba;
12824         struct lpfc_io_buf *lpfc_cmd;
12825         struct lpfc_iocbq *abtsiocbq;
12826         struct lpfc_nodelist *ndlp = NULL;
12827         struct lpfc_iocbq *iocbq;
12828         int sum, i, ret_val;
12829         unsigned long iflags;
12830         struct lpfc_sli_ring *pring_s4 = NULL;
12831         u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12832         bool ia;
12833
12834         spin_lock_irqsave(&phba->hbalock, iflags);
12835
12836         /* all I/Os are in process of being flushed */
12837         if (phba->hba_flag & HBA_IOQ_FLUSH) {
12838                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12839                 return 0;
12840         }
12841         sum = 0;
12842
12843         for (i = 1; i <= phba->sli.last_iotag; i++) {
12844                 iocbq = phba->sli.iocbq_lookup[i];
12845
12846                 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12847                         continue;
12848
12849                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12850                                                cmd) != 0)
12851                         continue;
12852
12853                 /* Guard against IO completion being called at same time */
12854                 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12855                 spin_lock(&lpfc_cmd->buf_lock);
12856
12857                 if (!lpfc_cmd->pCmd) {
12858                         spin_unlock(&lpfc_cmd->buf_lock);
12859                         continue;
12860                 }
12861
12862                 if (phba->sli_rev == LPFC_SLI_REV4) {
12863                         pring_s4 =
12864                             phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12865                         if (!pring_s4) {
12866                                 spin_unlock(&lpfc_cmd->buf_lock);
12867                                 continue;
12868                         }
12869                         /* Note: both hbalock and ring_lock must be set here */
12870                         spin_lock(&pring_s4->ring_lock);
12871                 }
12872
12873                 /*
12874                  * If the iocbq is already being aborted, don't take a second
12875                  * action, but do count it.
12876                  */
12877                 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12878                     !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12879                         if (phba->sli_rev == LPFC_SLI_REV4)
12880                                 spin_unlock(&pring_s4->ring_lock);
12881                         spin_unlock(&lpfc_cmd->buf_lock);
12882                         continue;
12883                 }
12884
12885                 /* issue ABTS for this IOCB based on iotag */
12886                 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12887                 if (!abtsiocbq) {
12888                         if (phba->sli_rev == LPFC_SLI_REV4)
12889                                 spin_unlock(&pring_s4->ring_lock);
12890                         spin_unlock(&lpfc_cmd->buf_lock);
12891                         continue;
12892                 }
12893
12894                 if (phba->sli_rev == LPFC_SLI_REV4) {
12895                         iotag = abtsiocbq->iotag;
12896                         ulp_context = iocbq->sli4_xritag;
12897                         cqid = lpfc_cmd->hdwq->io_cq_map;
12898                 } else {
12899                         iotag = iocbq->iocb.ulpIoTag;
12900                         if (pring->ringno == LPFC_ELS_RING) {
12901                                 ndlp = iocbq->ndlp;
12902                                 ulp_context = ndlp->nlp_rpi;
12903                         } else {
12904                                 ulp_context = iocbq->iocb.ulpContext;
12905                         }
12906                 }
12907
12908                 ndlp = lpfc_cmd->rdata->pnode;
12909
12910                 if (lpfc_is_link_up(phba) &&
12911                     (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12912                     !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12913                         ia = false;
12914                 else
12915                         ia = true;
12916
12917                 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12918                                         iocbq->iocb.ulpClass, cqid,
12919                                         ia, false);
12920
12921                 abtsiocbq->vport = vport;
12922
12923                 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12924                 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12925                 if (iocbq->cmd_flag & LPFC_IO_FCP)
12926                         abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12927                 if (iocbq->cmd_flag & LPFC_IO_FOF)
12928                         abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12929
12930                 /* Setup callback routine and issue the command. */
12931                 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12932
12933                 /*
12934                  * Indicate the IO is being aborted by the driver and set
12935                  * the caller's flag into the aborted IO.
12936                  */
12937                 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12938
12939                 if (phba->sli_rev == LPFC_SLI_REV4) {
12940                         ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12941                                                         abtsiocbq, 0);
12942                         spin_unlock(&pring_s4->ring_lock);
12943                 } else {
12944                         ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12945                                                         abtsiocbq, 0);
12946                 }
12947
12948                 spin_unlock(&lpfc_cmd->buf_lock);
12949
12950                 if (ret_val == IOCB_ERROR)
12951                         __lpfc_sli_release_iocbq(phba, abtsiocbq);
12952                 else
12953                         sum++;
12954         }
12955         spin_unlock_irqrestore(&phba->hbalock, iflags);
12956         return sum;
12957 }
12958
12959 /**
12960  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12961  * @phba: Pointer to HBA context object.
12962  * @cmdiocbq: Pointer to command iocb.
12963  * @rspiocbq: Pointer to response iocb.
12964  *
12965  * This function is the completion handler for iocbs issued using
12966  * lpfc_sli_issue_iocb_wait function. This function is called by the
12967  * ring event handler function without any lock held. This function
12968  * can be called from both worker thread context and interrupt
12969  * context. This function also can be called from other thread which
12970  * cleans up the SLI layer objects.
12971  * This function copy the contents of the response iocb to the
12972  * response iocb memory object provided by the caller of
12973  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12974  * sleeps for the iocb completion.
12975  **/
12976 static void
12977 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12978                         struct lpfc_iocbq *cmdiocbq,
12979                         struct lpfc_iocbq *rspiocbq)
12980 {
12981         wait_queue_head_t *pdone_q;
12982         unsigned long iflags;
12983         struct lpfc_io_buf *lpfc_cmd;
12984         size_t offset = offsetof(struct lpfc_iocbq, wqe);
12985
12986         spin_lock_irqsave(&phba->hbalock, iflags);
12987         if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12988
12989                 /*
12990                  * A time out has occurred for the iocb.  If a time out
12991                  * completion handler has been supplied, call it.  Otherwise,
12992                  * just free the iocbq.
12993                  */
12994
12995                 spin_unlock_irqrestore(&phba->hbalock, iflags);
12996                 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12997                 cmdiocbq->wait_cmd_cmpl = NULL;
12998                 if (cmdiocbq->cmd_cmpl)
12999                         cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
13000                 else
13001                         lpfc_sli_release_iocbq(phba, cmdiocbq);
13002                 return;
13003         }
13004
13005         /* Copy the contents of the local rspiocb into the caller's buffer. */
13006         cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13007         if (cmdiocbq->rsp_iocb && rspiocbq)
13008                 memcpy((char *)cmdiocbq->rsp_iocb + offset,
13009                        (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13010
13011         /* Set the exchange busy flag for task management commands */
13012         if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13013             !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13014                 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13015                                         cur_iocbq);
13016                 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13017                         lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13018                 else
13019                         lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13020         }
13021
13022         pdone_q = cmdiocbq->context_un.wait_queue;
13023         if (pdone_q)
13024                 wake_up(pdone_q);
13025         spin_unlock_irqrestore(&phba->hbalock, iflags);
13026         return;
13027 }
13028
13029 /**
13030  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13031  * @phba: Pointer to HBA context object..
13032  * @piocbq: Pointer to command iocb.
13033  * @flag: Flag to test.
13034  *
13035  * This routine grabs the hbalock and then test the cmd_flag to
13036  * see if the passed in flag is set.
13037  * Returns:
13038  * 1 if flag is set.
13039  * 0 if flag is not set.
13040  **/
13041 static int
13042 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13043                  struct lpfc_iocbq *piocbq, uint32_t flag)
13044 {
13045         unsigned long iflags;
13046         int ret;
13047
13048         spin_lock_irqsave(&phba->hbalock, iflags);
13049         ret = piocbq->cmd_flag & flag;
13050         spin_unlock_irqrestore(&phba->hbalock, iflags);
13051         return ret;
13052
13053 }
13054
13055 /**
13056  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13057  * @phba: Pointer to HBA context object..
13058  * @ring_number: Ring number
13059  * @piocb: Pointer to command iocb.
13060  * @prspiocbq: Pointer to response iocb.
13061  * @timeout: Timeout in number of seconds.
13062  *
13063  * This function issues the iocb to firmware and waits for the
13064  * iocb to complete. The cmd_cmpl field of the shall be used
13065  * to handle iocbs which time out. If the field is NULL, the
13066  * function shall free the iocbq structure.  If more clean up is
13067  * needed, the caller is expected to provide a completion function
13068  * that will provide the needed clean up.  If the iocb command is
13069  * not completed within timeout seconds, the function will either
13070  * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13071  * completion function set in the cmd_cmpl field and then return
13072  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
13073  * resources if this function returns IOCB_TIMEDOUT.
13074  * The function waits for the iocb completion using an
13075  * non-interruptible wait.
13076  * This function will sleep while waiting for iocb completion.
13077  * So, this function should not be called from any context which
13078  * does not allow sleeping. Due to the same reason, this function
13079  * cannot be called with interrupt disabled.
13080  * This function assumes that the iocb completions occur while
13081  * this function sleep. So, this function cannot be called from
13082  * the thread which process iocb completion for this ring.
13083  * This function clears the cmd_flag of the iocb object before
13084  * issuing the iocb and the iocb completion handler sets this
13085  * flag and wakes this thread when the iocb completes.
13086  * The contents of the response iocb will be copied to prspiocbq
13087  * by the completion handler when the command completes.
13088  * This function returns IOCB_SUCCESS when success.
13089  * This function is called with no lock held.
13090  **/
13091 int
13092 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13093                          uint32_t ring_number,
13094                          struct lpfc_iocbq *piocb,
13095                          struct lpfc_iocbq *prspiocbq,
13096                          uint32_t timeout)
13097 {
13098         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13099         long timeleft, timeout_req = 0;
13100         int retval = IOCB_SUCCESS;
13101         uint32_t creg_val;
13102         struct lpfc_iocbq *iocb;
13103         int txq_cnt = 0;
13104         int txcmplq_cnt = 0;
13105         struct lpfc_sli_ring *pring;
13106         unsigned long iflags;
13107         bool iocb_completed = true;
13108
13109         if (phba->sli_rev >= LPFC_SLI_REV4) {
13110                 lpfc_sli_prep_wqe(phba, piocb);
13111
13112                 pring = lpfc_sli4_calc_ring(phba, piocb);
13113         } else
13114                 pring = &phba->sli.sli3_ring[ring_number];
13115         /*
13116          * If the caller has provided a response iocbq buffer, then rsp_iocb
13117          * is NULL or its an error.
13118          */
13119         if (prspiocbq) {
13120                 if (piocb->rsp_iocb)
13121                         return IOCB_ERROR;
13122                 piocb->rsp_iocb = prspiocbq;
13123         }
13124
13125         piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13126         piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13127         piocb->context_un.wait_queue = &done_q;
13128         piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13129
13130         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13131                 if (lpfc_readl(phba->HCregaddr, &creg_val))
13132                         return IOCB_ERROR;
13133                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13134                 writel(creg_val, phba->HCregaddr);
13135                 readl(phba->HCregaddr); /* flush */
13136         }
13137
13138         retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13139                                      SLI_IOCB_RET_IOCB);
13140         if (retval == IOCB_SUCCESS) {
13141                 timeout_req = msecs_to_jiffies(timeout * 1000);
13142                 timeleft = wait_event_timeout(done_q,
13143                                 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13144                                 timeout_req);
13145                 spin_lock_irqsave(&phba->hbalock, iflags);
13146                 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13147
13148                         /*
13149                          * IOCB timed out.  Inform the wake iocb wait
13150                          * completion function and set local status
13151                          */
13152
13153                         iocb_completed = false;
13154                         piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13155                 }
13156                 spin_unlock_irqrestore(&phba->hbalock, iflags);
13157                 if (iocb_completed) {
13158                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13159                                         "0331 IOCB wake signaled\n");
13160                         /* Note: we are not indicating if the IOCB has a success
13161                          * status or not - that's for the caller to check.
13162                          * IOCB_SUCCESS means just that the command was sent and
13163                          * completed. Not that it completed successfully.
13164                          * */
13165                 } else if (timeleft == 0) {
13166                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13167                                         "0338 IOCB wait timeout error - no "
13168                                         "wake response Data x%x\n", timeout);
13169                         retval = IOCB_TIMEDOUT;
13170                 } else {
13171                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13172                                         "0330 IOCB wake NOT set, "
13173                                         "Data x%x x%lx\n",
13174                                         timeout, (timeleft / jiffies));
13175                         retval = IOCB_TIMEDOUT;
13176                 }
13177         } else if (retval == IOCB_BUSY) {
13178                 if (phba->cfg_log_verbose & LOG_SLI) {
13179                         list_for_each_entry(iocb, &pring->txq, list) {
13180                                 txq_cnt++;
13181                         }
13182                         list_for_each_entry(iocb, &pring->txcmplq, list) {
13183                                 txcmplq_cnt++;
13184                         }
13185                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13186                                 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13187                                 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13188                 }
13189                 return retval;
13190         } else {
13191                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13192                                 "0332 IOCB wait issue failed, Data x%x\n",
13193                                 retval);
13194                 retval = IOCB_ERROR;
13195         }
13196
13197         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13198                 if (lpfc_readl(phba->HCregaddr, &creg_val))
13199                         return IOCB_ERROR;
13200                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13201                 writel(creg_val, phba->HCregaddr);
13202                 readl(phba->HCregaddr); /* flush */
13203         }
13204
13205         if (prspiocbq)
13206                 piocb->rsp_iocb = NULL;
13207
13208         piocb->context_un.wait_queue = NULL;
13209         piocb->cmd_cmpl = NULL;
13210         return retval;
13211 }
13212
13213 /**
13214  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13215  * @phba: Pointer to HBA context object.
13216  * @pmboxq: Pointer to driver mailbox object.
13217  * @timeout: Timeout in number of seconds.
13218  *
13219  * This function issues the mailbox to firmware and waits for the
13220  * mailbox command to complete. If the mailbox command is not
13221  * completed within timeout seconds, it returns MBX_TIMEOUT.
13222  * The function waits for the mailbox completion using an
13223  * interruptible wait. If the thread is woken up due to a
13224  * signal, MBX_TIMEOUT error is returned to the caller. Caller
13225  * should not free the mailbox resources, if this function returns
13226  * MBX_TIMEOUT.
13227  * This function will sleep while waiting for mailbox completion.
13228  * So, this function should not be called from any context which
13229  * does not allow sleeping. Due to the same reason, this function
13230  * cannot be called with interrupt disabled.
13231  * This function assumes that the mailbox completion occurs while
13232  * this function sleep. So, this function cannot be called from
13233  * the worker thread which processes mailbox completion.
13234  * This function is called in the context of HBA management
13235  * applications.
13236  * This function returns MBX_SUCCESS when successful.
13237  * This function is called with no lock held.
13238  **/
13239 int
13240 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13241                          uint32_t timeout)
13242 {
13243         struct completion mbox_done;
13244         int retval;
13245         unsigned long flag;
13246
13247         pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13248         /* setup wake call as IOCB callback */
13249         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13250
13251         /* setup context3 field to pass wait_queue pointer to wake function  */
13252         init_completion(&mbox_done);
13253         pmboxq->context3 = &mbox_done;
13254         /* now issue the command */
13255         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13256         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13257                 wait_for_completion_timeout(&mbox_done,
13258                                             msecs_to_jiffies(timeout * 1000));
13259
13260                 spin_lock_irqsave(&phba->hbalock, flag);
13261                 pmboxq->context3 = NULL;
13262                 /*
13263                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
13264                  * else do not free the resources.
13265                  */
13266                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13267                         retval = MBX_SUCCESS;
13268                 } else {
13269                         retval = MBX_TIMEOUT;
13270                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13271                 }
13272                 spin_unlock_irqrestore(&phba->hbalock, flag);
13273         }
13274         return retval;
13275 }
13276
13277 /**
13278  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13279  * @phba: Pointer to HBA context.
13280  * @mbx_action: Mailbox shutdown options.
13281  *
13282  * This function is called to shutdown the driver's mailbox sub-system.
13283  * It first marks the mailbox sub-system is in a block state to prevent
13284  * the asynchronous mailbox command from issued off the pending mailbox
13285  * command queue. If the mailbox command sub-system shutdown is due to
13286  * HBA error conditions such as EEH or ERATT, this routine shall invoke
13287  * the mailbox sub-system flush routine to forcefully bring down the
13288  * mailbox sub-system. Otherwise, if it is due to normal condition (such
13289  * as with offline or HBA function reset), this routine will wait for the
13290  * outstanding mailbox command to complete before invoking the mailbox
13291  * sub-system flush routine to gracefully bring down mailbox sub-system.
13292  **/
13293 void
13294 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13295 {
13296         struct lpfc_sli *psli = &phba->sli;
13297         unsigned long timeout;
13298
13299         if (mbx_action == LPFC_MBX_NO_WAIT) {
13300                 /* delay 100ms for port state */
13301                 msleep(100);
13302                 lpfc_sli_mbox_sys_flush(phba);
13303                 return;
13304         }
13305         timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13306
13307         /* Disable softirqs, including timers from obtaining phba->hbalock */
13308         local_bh_disable();
13309
13310         spin_lock_irq(&phba->hbalock);
13311         psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13312
13313         if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13314                 /* Determine how long we might wait for the active mailbox
13315                  * command to be gracefully completed by firmware.
13316                  */
13317                 if (phba->sli.mbox_active)
13318                         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13319                                                 phba->sli.mbox_active) *
13320                                                 1000) + jiffies;
13321                 spin_unlock_irq(&phba->hbalock);
13322
13323                 /* Enable softirqs again, done with phba->hbalock */
13324                 local_bh_enable();
13325
13326                 while (phba->sli.mbox_active) {
13327                         /* Check active mailbox complete status every 2ms */
13328                         msleep(2);
13329                         if (time_after(jiffies, timeout))
13330                                 /* Timeout, let the mailbox flush routine to
13331                                  * forcefully release active mailbox command
13332                                  */
13333                                 break;
13334                 }
13335         } else {
13336                 spin_unlock_irq(&phba->hbalock);
13337
13338                 /* Enable softirqs again, done with phba->hbalock */
13339                 local_bh_enable();
13340         }
13341
13342         lpfc_sli_mbox_sys_flush(phba);
13343 }
13344
13345 /**
13346  * lpfc_sli_eratt_read - read sli-3 error attention events
13347  * @phba: Pointer to HBA context.
13348  *
13349  * This function is called to read the SLI3 device error attention registers
13350  * for possible error attention events. The caller must hold the hostlock
13351  * with spin_lock_irq().
13352  *
13353  * This function returns 1 when there is Error Attention in the Host Attention
13354  * Register and returns 0 otherwise.
13355  **/
13356 static int
13357 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13358 {
13359         uint32_t ha_copy;
13360
13361         /* Read chip Host Attention (HA) register */
13362         if (lpfc_readl(phba->HAregaddr, &ha_copy))
13363                 goto unplug_err;
13364
13365         if (ha_copy & HA_ERATT) {
13366                 /* Read host status register to retrieve error event */
13367                 if (lpfc_sli_read_hs(phba))
13368                         goto unplug_err;
13369
13370                 /* Check if there is a deferred error condition is active */
13371                 if ((HS_FFER1 & phba->work_hs) &&
13372                     ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13373                       HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13374                         phba->hba_flag |= DEFER_ERATT;
13375                         /* Clear all interrupt enable conditions */
13376                         writel(0, phba->HCregaddr);
13377                         readl(phba->HCregaddr);
13378                 }
13379
13380                 /* Set the driver HA work bitmap */
13381                 phba->work_ha |= HA_ERATT;
13382                 /* Indicate polling handles this ERATT */
13383                 phba->hba_flag |= HBA_ERATT_HANDLED;
13384                 return 1;
13385         }
13386         return 0;
13387
13388 unplug_err:
13389         /* Set the driver HS work bitmap */
13390         phba->work_hs |= UNPLUG_ERR;
13391         /* Set the driver HA work bitmap */
13392         phba->work_ha |= HA_ERATT;
13393         /* Indicate polling handles this ERATT */
13394         phba->hba_flag |= HBA_ERATT_HANDLED;
13395         return 1;
13396 }
13397
13398 /**
13399  * lpfc_sli4_eratt_read - read sli-4 error attention events
13400  * @phba: Pointer to HBA context.
13401  *
13402  * This function is called to read the SLI4 device error attention registers
13403  * for possible error attention events. The caller must hold the hostlock
13404  * with spin_lock_irq().
13405  *
13406  * This function returns 1 when there is Error Attention in the Host Attention
13407  * Register and returns 0 otherwise.
13408  **/
13409 static int
13410 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13411 {
13412         uint32_t uerr_sta_hi, uerr_sta_lo;
13413         uint32_t if_type, portsmphr;
13414         struct lpfc_register portstat_reg;
13415         u32 logmask;
13416
13417         /*
13418          * For now, use the SLI4 device internal unrecoverable error
13419          * registers for error attention. This can be changed later.
13420          */
13421         if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13422         switch (if_type) {
13423         case LPFC_SLI_INTF_IF_TYPE_0:
13424                 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13425                         &uerr_sta_lo) ||
13426                         lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13427                         &uerr_sta_hi)) {
13428                         phba->work_hs |= UNPLUG_ERR;
13429                         phba->work_ha |= HA_ERATT;
13430                         phba->hba_flag |= HBA_ERATT_HANDLED;
13431                         return 1;
13432                 }
13433                 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13434                     (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13435                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13436                                         "1423 HBA Unrecoverable error: "
13437                                         "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13438                                         "ue_mask_lo_reg=0x%x, "
13439                                         "ue_mask_hi_reg=0x%x\n",
13440                                         uerr_sta_lo, uerr_sta_hi,
13441                                         phba->sli4_hba.ue_mask_lo,
13442                                         phba->sli4_hba.ue_mask_hi);
13443                         phba->work_status[0] = uerr_sta_lo;
13444                         phba->work_status[1] = uerr_sta_hi;
13445                         phba->work_ha |= HA_ERATT;
13446                         phba->hba_flag |= HBA_ERATT_HANDLED;
13447                         return 1;
13448                 }
13449                 break;
13450         case LPFC_SLI_INTF_IF_TYPE_2:
13451         case LPFC_SLI_INTF_IF_TYPE_6:
13452                 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13453                         &portstat_reg.word0) ||
13454                         lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13455                         &portsmphr)){
13456                         phba->work_hs |= UNPLUG_ERR;
13457                         phba->work_ha |= HA_ERATT;
13458                         phba->hba_flag |= HBA_ERATT_HANDLED;
13459                         return 1;
13460                 }
13461                 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13462                         phba->work_status[0] =
13463                                 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13464                         phba->work_status[1] =
13465                                 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13466                         logmask = LOG_TRACE_EVENT;
13467                         if (phba->work_status[0] ==
13468                                 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13469                             phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13470                                 logmask = LOG_SLI;
13471                         lpfc_printf_log(phba, KERN_ERR, logmask,
13472                                         "2885 Port Status Event: "
13473                                         "port status reg 0x%x, "
13474                                         "port smphr reg 0x%x, "
13475                                         "error 1=0x%x, error 2=0x%x\n",
13476                                         portstat_reg.word0,
13477                                         portsmphr,
13478                                         phba->work_status[0],
13479                                         phba->work_status[1]);
13480                         phba->work_ha |= HA_ERATT;
13481                         phba->hba_flag |= HBA_ERATT_HANDLED;
13482                         return 1;
13483                 }
13484                 break;
13485         case LPFC_SLI_INTF_IF_TYPE_1:
13486         default:
13487                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13488                                 "2886 HBA Error Attention on unsupported "
13489                                 "if type %d.", if_type);
13490                 return 1;
13491         }
13492
13493         return 0;
13494 }
13495
13496 /**
13497  * lpfc_sli_check_eratt - check error attention events
13498  * @phba: Pointer to HBA context.
13499  *
13500  * This function is called from timer soft interrupt context to check HBA's
13501  * error attention register bit for error attention events.
13502  *
13503  * This function returns 1 when there is Error Attention in the Host Attention
13504  * Register and returns 0 otherwise.
13505  **/
13506 int
13507 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13508 {
13509         uint32_t ha_copy;
13510
13511         /* If somebody is waiting to handle an eratt, don't process it
13512          * here. The brdkill function will do this.
13513          */
13514         if (phba->link_flag & LS_IGNORE_ERATT)
13515                 return 0;
13516
13517         /* Check if interrupt handler handles this ERATT */
13518         spin_lock_irq(&phba->hbalock);
13519         if (phba->hba_flag & HBA_ERATT_HANDLED) {
13520                 /* Interrupt handler has handled ERATT */
13521                 spin_unlock_irq(&phba->hbalock);
13522                 return 0;
13523         }
13524
13525         /*
13526          * If there is deferred error attention, do not check for error
13527          * attention
13528          */
13529         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13530                 spin_unlock_irq(&phba->hbalock);
13531                 return 0;
13532         }
13533
13534         /* If PCI channel is offline, don't process it */
13535         if (unlikely(pci_channel_offline(phba->pcidev))) {
13536                 spin_unlock_irq(&phba->hbalock);
13537                 return 0;
13538         }
13539
13540         switch (phba->sli_rev) {
13541         case LPFC_SLI_REV2:
13542         case LPFC_SLI_REV3:
13543                 /* Read chip Host Attention (HA) register */
13544                 ha_copy = lpfc_sli_eratt_read(phba);
13545                 break;
13546         case LPFC_SLI_REV4:
13547                 /* Read device Uncoverable Error (UERR) registers */
13548                 ha_copy = lpfc_sli4_eratt_read(phba);
13549                 break;
13550         default:
13551                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13552                                 "0299 Invalid SLI revision (%d)\n",
13553                                 phba->sli_rev);
13554                 ha_copy = 0;
13555                 break;
13556         }
13557         spin_unlock_irq(&phba->hbalock);
13558
13559         return ha_copy;
13560 }
13561
13562 /**
13563  * lpfc_intr_state_check - Check device state for interrupt handling
13564  * @phba: Pointer to HBA context.
13565  *
13566  * This inline routine checks whether a device or its PCI slot is in a state
13567  * that the interrupt should be handled.
13568  *
13569  * This function returns 0 if the device or the PCI slot is in a state that
13570  * interrupt should be handled, otherwise -EIO.
13571  */
13572 static inline int
13573 lpfc_intr_state_check(struct lpfc_hba *phba)
13574 {
13575         /* If the pci channel is offline, ignore all the interrupts */
13576         if (unlikely(pci_channel_offline(phba->pcidev)))
13577                 return -EIO;
13578
13579         /* Update device level interrupt statistics */
13580         phba->sli.slistat.sli_intr++;
13581
13582         /* Ignore all interrupts during initialization. */
13583         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13584                 return -EIO;
13585
13586         return 0;
13587 }
13588
13589 /**
13590  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13591  * @irq: Interrupt number.
13592  * @dev_id: The device context pointer.
13593  *
13594  * This function is directly called from the PCI layer as an interrupt
13595  * service routine when device with SLI-3 interface spec is enabled with
13596  * MSI-X multi-message interrupt mode and there are slow-path events in
13597  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13598  * interrupt mode, this function is called as part of the device-level
13599  * interrupt handler. When the PCI slot is in error recovery or the HBA
13600  * is undergoing initialization, the interrupt handler will not process
13601  * the interrupt. The link attention and ELS ring attention events are
13602  * handled by the worker thread. The interrupt handler signals the worker
13603  * thread and returns for these events. This function is called without
13604  * any lock held. It gets the hbalock to access and update SLI data
13605  * structures.
13606  *
13607  * This function returns IRQ_HANDLED when interrupt is handled else it
13608  * returns IRQ_NONE.
13609  **/
13610 irqreturn_t
13611 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13612 {
13613         struct lpfc_hba  *phba;
13614         uint32_t ha_copy, hc_copy;
13615         uint32_t work_ha_copy;
13616         unsigned long status;
13617         unsigned long iflag;
13618         uint32_t control;
13619
13620         MAILBOX_t *mbox, *pmbox;
13621         struct lpfc_vport *vport;
13622         struct lpfc_nodelist *ndlp;
13623         struct lpfc_dmabuf *mp;
13624         LPFC_MBOXQ_t *pmb;
13625         int rc;
13626
13627         /*
13628          * Get the driver's phba structure from the dev_id and
13629          * assume the HBA is not interrupting.
13630          */
13631         phba = (struct lpfc_hba *)dev_id;
13632
13633         if (unlikely(!phba))
13634                 return IRQ_NONE;
13635
13636         /*
13637          * Stuff needs to be attented to when this function is invoked as an
13638          * individual interrupt handler in MSI-X multi-message interrupt mode
13639          */
13640         if (phba->intr_type == MSIX) {
13641                 /* Check device state for handling interrupt */
13642                 if (lpfc_intr_state_check(phba))
13643                         return IRQ_NONE;
13644                 /* Need to read HA REG for slow-path events */
13645                 spin_lock_irqsave(&phba->hbalock, iflag);
13646                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13647                         goto unplug_error;
13648                 /* If somebody is waiting to handle an eratt don't process it
13649                  * here. The brdkill function will do this.
13650                  */
13651                 if (phba->link_flag & LS_IGNORE_ERATT)
13652                         ha_copy &= ~HA_ERATT;
13653                 /* Check the need for handling ERATT in interrupt handler */
13654                 if (ha_copy & HA_ERATT) {
13655                         if (phba->hba_flag & HBA_ERATT_HANDLED)
13656                                 /* ERATT polling has handled ERATT */
13657                                 ha_copy &= ~HA_ERATT;
13658                         else
13659                                 /* Indicate interrupt handler handles ERATT */
13660                                 phba->hba_flag |= HBA_ERATT_HANDLED;
13661                 }
13662
13663                 /*
13664                  * If there is deferred error attention, do not check for any
13665                  * interrupt.
13666                  */
13667                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13668                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13669                         return IRQ_NONE;
13670                 }
13671
13672                 /* Clear up only attention source related to slow-path */
13673                 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13674                         goto unplug_error;
13675
13676                 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13677                         HC_LAINT_ENA | HC_ERINT_ENA),
13678                         phba->HCregaddr);
13679                 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13680                         phba->HAregaddr);
13681                 writel(hc_copy, phba->HCregaddr);
13682                 readl(phba->HAregaddr); /* flush */
13683                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13684         } else
13685                 ha_copy = phba->ha_copy;
13686
13687         work_ha_copy = ha_copy & phba->work_ha_mask;
13688
13689         if (work_ha_copy) {
13690                 if (work_ha_copy & HA_LATT) {
13691                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13692                                 /*
13693                                  * Turn off Link Attention interrupts
13694                                  * until CLEAR_LA done
13695                                  */
13696                                 spin_lock_irqsave(&phba->hbalock, iflag);
13697                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13698                                 if (lpfc_readl(phba->HCregaddr, &control))
13699                                         goto unplug_error;
13700                                 control &= ~HC_LAINT_ENA;
13701                                 writel(control, phba->HCregaddr);
13702                                 readl(phba->HCregaddr); /* flush */
13703                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13704                         }
13705                         else
13706                                 work_ha_copy &= ~HA_LATT;
13707                 }
13708
13709                 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13710                         /*
13711                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13712                          * the only slow ring.
13713                          */
13714                         status = (work_ha_copy &
13715                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
13716                         status >>= (4*LPFC_ELS_RING);
13717                         if (status & HA_RXMASK) {
13718                                 spin_lock_irqsave(&phba->hbalock, iflag);
13719                                 if (lpfc_readl(phba->HCregaddr, &control))
13720                                         goto unplug_error;
13721
13722                                 lpfc_debugfs_slow_ring_trc(phba,
13723                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
13724                                 control, status,
13725                                 (uint32_t)phba->sli.slistat.sli_intr);
13726
13727                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13728                                         lpfc_debugfs_slow_ring_trc(phba,
13729                                                 "ISR Disable ring:"
13730                                                 "pwork:x%x hawork:x%x wait:x%x",
13731                                                 phba->work_ha, work_ha_copy,
13732                                                 (uint32_t)((unsigned long)
13733                                                 &phba->work_waitq));
13734
13735                                         control &=
13736                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
13737                                         writel(control, phba->HCregaddr);
13738                                         readl(phba->HCregaddr); /* flush */
13739                                 }
13740                                 else {
13741                                         lpfc_debugfs_slow_ring_trc(phba,
13742                                                 "ISR slow ring:   pwork:"
13743                                                 "x%x hawork:x%x wait:x%x",
13744                                                 phba->work_ha, work_ha_copy,
13745                                                 (uint32_t)((unsigned long)
13746                                                 &phba->work_waitq));
13747                                 }
13748                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13749                         }
13750                 }
13751                 spin_lock_irqsave(&phba->hbalock, iflag);
13752                 if (work_ha_copy & HA_ERATT) {
13753                         if (lpfc_sli_read_hs(phba))
13754                                 goto unplug_error;
13755                         /*
13756                          * Check if there is a deferred error condition
13757                          * is active
13758                          */
13759                         if ((HS_FFER1 & phba->work_hs) &&
13760                                 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13761                                   HS_FFER6 | HS_FFER7 | HS_FFER8) &
13762                                   phba->work_hs)) {
13763                                 phba->hba_flag |= DEFER_ERATT;
13764                                 /* Clear all interrupt enable conditions */
13765                                 writel(0, phba->HCregaddr);
13766                                 readl(phba->HCregaddr);
13767                         }
13768                 }
13769
13770                 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13771                         pmb = phba->sli.mbox_active;
13772                         pmbox = &pmb->u.mb;
13773                         mbox = phba->mbox;
13774                         vport = pmb->vport;
13775
13776                         /* First check out the status word */
13777                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13778                         if (pmbox->mbxOwner != OWN_HOST) {
13779                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13780                                 /*
13781                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
13782                                  * mbxStatus <status>
13783                                  */
13784                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13785                                                 "(%d):0304 Stray Mailbox "
13786                                                 "Interrupt mbxCommand x%x "
13787                                                 "mbxStatus x%x\n",
13788                                                 (vport ? vport->vpi : 0),
13789                                                 pmbox->mbxCommand,
13790                                                 pmbox->mbxStatus);
13791                                 /* clear mailbox attention bit */
13792                                 work_ha_copy &= ~HA_MBATT;
13793                         } else {
13794                                 phba->sli.mbox_active = NULL;
13795                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13796                                 phba->last_completion_time = jiffies;
13797                                 del_timer(&phba->sli.mbox_tmo);
13798                                 if (pmb->mbox_cmpl) {
13799                                         lpfc_sli_pcimem_bcopy(mbox, pmbox,
13800                                                         MAILBOX_CMD_SIZE);
13801                                         if (pmb->out_ext_byte_len &&
13802                                                 pmb->ctx_buf)
13803                                                 lpfc_sli_pcimem_bcopy(
13804                                                 phba->mbox_ext,
13805                                                 pmb->ctx_buf,
13806                                                 pmb->out_ext_byte_len);
13807                                 }
13808                                 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13809                                         pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13810
13811                                         lpfc_debugfs_disc_trc(vport,
13812                                                 LPFC_DISC_TRC_MBOX_VPORT,
13813                                                 "MBOX dflt rpi: : "
13814                                                 "status:x%x rpi:x%x",
13815                                                 (uint32_t)pmbox->mbxStatus,
13816                                                 pmbox->un.varWords[0], 0);
13817
13818                                         if (!pmbox->mbxStatus) {
13819                                                 mp = (struct lpfc_dmabuf *)
13820                                                         (pmb->ctx_buf);
13821                                                 ndlp = (struct lpfc_nodelist *)
13822                                                         pmb->ctx_ndlp;
13823
13824                                                 /* Reg_LOGIN of dflt RPI was
13825                                                  * successful. new lets get
13826                                                  * rid of the RPI using the
13827                                                  * same mbox buffer.
13828                                                  */
13829                                                 lpfc_unreg_login(phba,
13830                                                         vport->vpi,
13831                                                         pmbox->un.varWords[0],
13832                                                         pmb);
13833                                                 pmb->mbox_cmpl =
13834                                                         lpfc_mbx_cmpl_dflt_rpi;
13835                                                 pmb->ctx_buf = mp;
13836                                                 pmb->ctx_ndlp = ndlp;
13837                                                 pmb->vport = vport;
13838                                                 rc = lpfc_sli_issue_mbox(phba,
13839                                                                 pmb,
13840                                                                 MBX_NOWAIT);
13841                                                 if (rc != MBX_BUSY)
13842                                                         lpfc_printf_log(phba,
13843                                                         KERN_ERR,
13844                                                         LOG_TRACE_EVENT,
13845                                                         "0350 rc should have"
13846                                                         "been MBX_BUSY\n");
13847                                                 if (rc != MBX_NOT_FINISHED)
13848                                                         goto send_current_mbox;
13849                                         }
13850                                 }
13851                                 spin_lock_irqsave(
13852                                                 &phba->pport->work_port_lock,
13853                                                 iflag);
13854                                 phba->pport->work_port_events &=
13855                                         ~WORKER_MBOX_TMO;
13856                                 spin_unlock_irqrestore(
13857                                                 &phba->pport->work_port_lock,
13858                                                 iflag);
13859
13860                                 /* Do NOT queue MBX_HEARTBEAT to the worker
13861                                  * thread for processing.
13862                                  */
13863                                 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13864                                         /* Process mbox now */
13865                                         phba->sli.mbox_active = NULL;
13866                                         phba->sli.sli_flag &=
13867                                                 ~LPFC_SLI_MBOX_ACTIVE;
13868                                         if (pmb->mbox_cmpl)
13869                                                 pmb->mbox_cmpl(phba, pmb);
13870                                 } else {
13871                                         /* Queue to worker thread to process */
13872                                         lpfc_mbox_cmpl_put(phba, pmb);
13873                                 }
13874                         }
13875                 } else
13876                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13877
13878                 if ((work_ha_copy & HA_MBATT) &&
13879                     (phba->sli.mbox_active == NULL)) {
13880 send_current_mbox:
13881                         /* Process next mailbox command if there is one */
13882                         do {
13883                                 rc = lpfc_sli_issue_mbox(phba, NULL,
13884                                                          MBX_NOWAIT);
13885                         } while (rc == MBX_NOT_FINISHED);
13886                         if (rc != MBX_SUCCESS)
13887                                 lpfc_printf_log(phba, KERN_ERR,
13888                                                 LOG_TRACE_EVENT,
13889                                                 "0349 rc should be "
13890                                                 "MBX_SUCCESS\n");
13891                 }
13892
13893                 spin_lock_irqsave(&phba->hbalock, iflag);
13894                 phba->work_ha |= work_ha_copy;
13895                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13896                 lpfc_worker_wake_up(phba);
13897         }
13898         return IRQ_HANDLED;
13899 unplug_error:
13900         spin_unlock_irqrestore(&phba->hbalock, iflag);
13901         return IRQ_HANDLED;
13902
13903 } /* lpfc_sli_sp_intr_handler */
13904
13905 /**
13906  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13907  * @irq: Interrupt number.
13908  * @dev_id: The device context pointer.
13909  *
13910  * This function is directly called from the PCI layer as an interrupt
13911  * service routine when device with SLI-3 interface spec is enabled with
13912  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13913  * ring event in the HBA. However, when the device is enabled with either
13914  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13915  * device-level interrupt handler. When the PCI slot is in error recovery
13916  * or the HBA is undergoing initialization, the interrupt handler will not
13917  * process the interrupt. The SCSI FCP fast-path ring event are handled in
13918  * the intrrupt context. This function is called without any lock held.
13919  * It gets the hbalock to access and update SLI data structures.
13920  *
13921  * This function returns IRQ_HANDLED when interrupt is handled else it
13922  * returns IRQ_NONE.
13923  **/
13924 irqreturn_t
13925 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13926 {
13927         struct lpfc_hba  *phba;
13928         uint32_t ha_copy;
13929         unsigned long status;
13930         unsigned long iflag;
13931         struct lpfc_sli_ring *pring;
13932
13933         /* Get the driver's phba structure from the dev_id and
13934          * assume the HBA is not interrupting.
13935          */
13936         phba = (struct lpfc_hba *) dev_id;
13937
13938         if (unlikely(!phba))
13939                 return IRQ_NONE;
13940
13941         /*
13942          * Stuff needs to be attented to when this function is invoked as an
13943          * individual interrupt handler in MSI-X multi-message interrupt mode
13944          */
13945         if (phba->intr_type == MSIX) {
13946                 /* Check device state for handling interrupt */
13947                 if (lpfc_intr_state_check(phba))
13948                         return IRQ_NONE;
13949                 /* Need to read HA REG for FCP ring and other ring events */
13950                 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13951                         return IRQ_HANDLED;
13952                 /* Clear up only attention source related to fast-path */
13953                 spin_lock_irqsave(&phba->hbalock, iflag);
13954                 /*
13955                  * If there is deferred error attention, do not check for
13956                  * any interrupt.
13957                  */
13958                 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13959                         spin_unlock_irqrestore(&phba->hbalock, iflag);
13960                         return IRQ_NONE;
13961                 }
13962                 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13963                         phba->HAregaddr);
13964                 readl(phba->HAregaddr); /* flush */
13965                 spin_unlock_irqrestore(&phba->hbalock, iflag);
13966         } else
13967                 ha_copy = phba->ha_copy;
13968
13969         /*
13970          * Process all events on FCP ring. Take the optimized path for FCP IO.
13971          */
13972         ha_copy &= ~(phba->work_ha_mask);
13973
13974         status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13975         status >>= (4*LPFC_FCP_RING);
13976         pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13977         if (status & HA_RXMASK)
13978                 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13979
13980         if (phba->cfg_multi_ring_support == 2) {
13981                 /*
13982                  * Process all events on extra ring. Take the optimized path
13983                  * for extra ring IO.
13984                  */
13985                 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13986                 status >>= (4*LPFC_EXTRA_RING);
13987                 if (status & HA_RXMASK) {
13988                         lpfc_sli_handle_fast_ring_event(phba,
13989                                         &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13990                                         status);
13991                 }
13992         }
13993         return IRQ_HANDLED;
13994 }  /* lpfc_sli_fp_intr_handler */
13995
13996 /**
13997  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13998  * @irq: Interrupt number.
13999  * @dev_id: The device context pointer.
14000  *
14001  * This function is the HBA device-level interrupt handler to device with
14002  * SLI-3 interface spec, called from the PCI layer when either MSI or
14003  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
14004  * requires driver attention. This function invokes the slow-path interrupt
14005  * attention handling function and fast-path interrupt attention handling
14006  * function in turn to process the relevant HBA attention events. This
14007  * function is called without any lock held. It gets the hbalock to access
14008  * and update SLI data structures.
14009  *
14010  * This function returns IRQ_HANDLED when interrupt is handled, else it
14011  * returns IRQ_NONE.
14012  **/
14013 irqreturn_t
14014 lpfc_sli_intr_handler(int irq, void *dev_id)
14015 {
14016         struct lpfc_hba  *phba;
14017         irqreturn_t sp_irq_rc, fp_irq_rc;
14018         unsigned long status1, status2;
14019         uint32_t hc_copy;
14020
14021         /*
14022          * Get the driver's phba structure from the dev_id and
14023          * assume the HBA is not interrupting.
14024          */
14025         phba = (struct lpfc_hba *) dev_id;
14026
14027         if (unlikely(!phba))
14028                 return IRQ_NONE;
14029
14030         /* Check device state for handling interrupt */
14031         if (lpfc_intr_state_check(phba))
14032                 return IRQ_NONE;
14033
14034         spin_lock(&phba->hbalock);
14035         if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14036                 spin_unlock(&phba->hbalock);
14037                 return IRQ_HANDLED;
14038         }
14039
14040         if (unlikely(!phba->ha_copy)) {
14041                 spin_unlock(&phba->hbalock);
14042                 return IRQ_NONE;
14043         } else if (phba->ha_copy & HA_ERATT) {
14044                 if (phba->hba_flag & HBA_ERATT_HANDLED)
14045                         /* ERATT polling has handled ERATT */
14046                         phba->ha_copy &= ~HA_ERATT;
14047                 else
14048                         /* Indicate interrupt handler handles ERATT */
14049                         phba->hba_flag |= HBA_ERATT_HANDLED;
14050         }
14051
14052         /*
14053          * If there is deferred error attention, do not check for any interrupt.
14054          */
14055         if (unlikely(phba->hba_flag & DEFER_ERATT)) {
14056                 spin_unlock(&phba->hbalock);
14057                 return IRQ_NONE;
14058         }
14059
14060         /* Clear attention sources except link and error attentions */
14061         if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14062                 spin_unlock(&phba->hbalock);
14063                 return IRQ_HANDLED;
14064         }
14065         writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14066                 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14067                 phba->HCregaddr);
14068         writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14069         writel(hc_copy, phba->HCregaddr);
14070         readl(phba->HAregaddr); /* flush */
14071         spin_unlock(&phba->hbalock);
14072
14073         /*
14074          * Invokes slow-path host attention interrupt handling as appropriate.
14075          */
14076
14077         /* status of events with mailbox and link attention */
14078         status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14079
14080         /* status of events with ELS ring */
14081         status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
14082         status2 >>= (4*LPFC_ELS_RING);
14083
14084         if (status1 || (status2 & HA_RXMASK))
14085                 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14086         else
14087                 sp_irq_rc = IRQ_NONE;
14088
14089         /*
14090          * Invoke fast-path host attention interrupt handling as appropriate.
14091          */
14092
14093         /* status of events with FCP ring */
14094         status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14095         status1 >>= (4*LPFC_FCP_RING);
14096
14097         /* status of events with extra ring */
14098         if (phba->cfg_multi_ring_support == 2) {
14099                 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14100                 status2 >>= (4*LPFC_EXTRA_RING);
14101         } else
14102                 status2 = 0;
14103
14104         if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14105                 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14106         else
14107                 fp_irq_rc = IRQ_NONE;
14108
14109         /* Return device-level interrupt handling status */
14110         return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14111 }  /* lpfc_sli_intr_handler */
14112
14113 /**
14114  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14115  * @phba: pointer to lpfc hba data structure.
14116  *
14117  * This routine is invoked by the worker thread to process all the pending
14118  * SLI4 els abort xri events.
14119  **/
14120 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14121 {
14122         struct lpfc_cq_event *cq_event;
14123         unsigned long iflags;
14124
14125         /* First, declare the els xri abort event has been handled */
14126         spin_lock_irqsave(&phba->hbalock, iflags);
14127         phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
14128         spin_unlock_irqrestore(&phba->hbalock, iflags);
14129
14130         /* Now, handle all the els xri abort events */
14131         spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14132         while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14133                 /* Get the first event from the head of the event queue */
14134                 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14135                                  cq_event, struct lpfc_cq_event, list);
14136                 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14137                                        iflags);
14138                 /* Notify aborted XRI for ELS work queue */
14139                 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14140
14141                 /* Free the event processed back to the free pool */
14142                 lpfc_sli4_cq_event_release(phba, cq_event);
14143                 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14144                                   iflags);
14145         }
14146         spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14147 }
14148
14149 /**
14150  * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14151  * @phba: Pointer to HBA context object.
14152  * @irspiocbq: Pointer to work-queue completion queue entry.
14153  *
14154  * This routine handles an ELS work-queue completion event and construct
14155  * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14156  * discovery engine to handle.
14157  *
14158  * Return: Pointer to the receive IOCBQ, NULL otherwise.
14159  **/
14160 static struct lpfc_iocbq *
14161 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14162                                   struct lpfc_iocbq *irspiocbq)
14163 {
14164         struct lpfc_sli_ring *pring;
14165         struct lpfc_iocbq *cmdiocbq;
14166         struct lpfc_wcqe_complete *wcqe;
14167         unsigned long iflags;
14168
14169         pring = lpfc_phba_elsring(phba);
14170         if (unlikely(!pring))
14171                 return NULL;
14172
14173         wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14174         spin_lock_irqsave(&pring->ring_lock, iflags);
14175         pring->stats.iocb_event++;
14176         /* Look up the ELS command IOCB and create pseudo response IOCB */
14177         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14178                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14179         if (unlikely(!cmdiocbq)) {
14180                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14181                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14182                                 "0386 ELS complete with no corresponding "
14183                                 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14184                                 wcqe->word0, wcqe->total_data_placed,
14185                                 wcqe->parameter, wcqe->word3);
14186                 lpfc_sli_release_iocbq(phba, irspiocbq);
14187                 return NULL;
14188         }
14189
14190         memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14191         memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14192
14193         /* Put the iocb back on the txcmplq */
14194         lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14195         spin_unlock_irqrestore(&pring->ring_lock, iflags);
14196
14197         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14198                 spin_lock_irqsave(&phba->hbalock, iflags);
14199                 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14200                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14201         }
14202
14203         return irspiocbq;
14204 }
14205
14206 inline struct lpfc_cq_event *
14207 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14208 {
14209         struct lpfc_cq_event *cq_event;
14210
14211         /* Allocate a new internal CQ_EVENT entry */
14212         cq_event = lpfc_sli4_cq_event_alloc(phba);
14213         if (!cq_event) {
14214                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14215                                 "0602 Failed to alloc CQ_EVENT entry\n");
14216                 return NULL;
14217         }
14218
14219         /* Move the CQE into the event */
14220         memcpy(&cq_event->cqe, entry, size);
14221         return cq_event;
14222 }
14223
14224 /**
14225  * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14226  * @phba: Pointer to HBA context object.
14227  * @mcqe: Pointer to mailbox completion queue entry.
14228  *
14229  * This routine process a mailbox completion queue entry with asynchronous
14230  * event.
14231  *
14232  * Return: true if work posted to worker thread, otherwise false.
14233  **/
14234 static bool
14235 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14236 {
14237         struct lpfc_cq_event *cq_event;
14238         unsigned long iflags;
14239
14240         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14241                         "0392 Async Event: word0:x%x, word1:x%x, "
14242                         "word2:x%x, word3:x%x\n", mcqe->word0,
14243                         mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14244
14245         cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14246         if (!cq_event)
14247                 return false;
14248
14249         spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14250         list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14251         spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14252
14253         /* Set the async event flag */
14254         spin_lock_irqsave(&phba->hbalock, iflags);
14255         phba->hba_flag |= ASYNC_EVENT;
14256         spin_unlock_irqrestore(&phba->hbalock, iflags);
14257
14258         return true;
14259 }
14260
14261 /**
14262  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14263  * @phba: Pointer to HBA context object.
14264  * @mcqe: Pointer to mailbox completion queue entry.
14265  *
14266  * This routine process a mailbox completion queue entry with mailbox
14267  * completion event.
14268  *
14269  * Return: true if work posted to worker thread, otherwise false.
14270  **/
14271 static bool
14272 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14273 {
14274         uint32_t mcqe_status;
14275         MAILBOX_t *mbox, *pmbox;
14276         struct lpfc_mqe *mqe;
14277         struct lpfc_vport *vport;
14278         struct lpfc_nodelist *ndlp;
14279         struct lpfc_dmabuf *mp;
14280         unsigned long iflags;
14281         LPFC_MBOXQ_t *pmb;
14282         bool workposted = false;
14283         int rc;
14284
14285         /* If not a mailbox complete MCQE, out by checking mailbox consume */
14286         if (!bf_get(lpfc_trailer_completed, mcqe))
14287                 goto out_no_mqe_complete;
14288
14289         /* Get the reference to the active mbox command */
14290         spin_lock_irqsave(&phba->hbalock, iflags);
14291         pmb = phba->sli.mbox_active;
14292         if (unlikely(!pmb)) {
14293                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14294                                 "1832 No pending MBOX command to handle\n");
14295                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14296                 goto out_no_mqe_complete;
14297         }
14298         spin_unlock_irqrestore(&phba->hbalock, iflags);
14299         mqe = &pmb->u.mqe;
14300         pmbox = (MAILBOX_t *)&pmb->u.mqe;
14301         mbox = phba->mbox;
14302         vport = pmb->vport;
14303
14304         /* Reset heartbeat timer */
14305         phba->last_completion_time = jiffies;
14306         del_timer(&phba->sli.mbox_tmo);
14307
14308         /* Move mbox data to caller's mailbox region, do endian swapping */
14309         if (pmb->mbox_cmpl && mbox)
14310                 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14311
14312         /*
14313          * For mcqe errors, conditionally move a modified error code to
14314          * the mbox so that the error will not be missed.
14315          */
14316         mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14317         if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14318                 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14319                         bf_set(lpfc_mqe_status, mqe,
14320                                (LPFC_MBX_ERROR_RANGE | mcqe_status));
14321         }
14322         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14323                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14324                 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14325                                       "MBOX dflt rpi: status:x%x rpi:x%x",
14326                                       mcqe_status,
14327                                       pmbox->un.varWords[0], 0);
14328                 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14329                         mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14330                         ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14331
14332                         /* Reg_LOGIN of dflt RPI was successful. Mark the
14333                          * node as having an UNREG_LOGIN in progress to stop
14334                          * an unsolicited PLOGI from the same NPortId from
14335                          * starting another mailbox transaction.
14336                          */
14337                         spin_lock_irqsave(&ndlp->lock, iflags);
14338                         ndlp->nlp_flag |= NLP_UNREG_INP;
14339                         spin_unlock_irqrestore(&ndlp->lock, iflags);
14340                         lpfc_unreg_login(phba, vport->vpi,
14341                                          pmbox->un.varWords[0], pmb);
14342                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14343                         pmb->ctx_buf = mp;
14344
14345                         /* No reference taken here.  This is a default
14346                          * RPI reg/immediate unreg cycle. The reference was
14347                          * taken in the reg rpi path and is released when
14348                          * this mailbox completes.
14349                          */
14350                         pmb->ctx_ndlp = ndlp;
14351                         pmb->vport = vport;
14352                         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14353                         if (rc != MBX_BUSY)
14354                                 lpfc_printf_log(phba, KERN_ERR,
14355                                                 LOG_TRACE_EVENT,
14356                                                 "0385 rc should "
14357                                                 "have been MBX_BUSY\n");
14358                         if (rc != MBX_NOT_FINISHED)
14359                                 goto send_current_mbox;
14360                 }
14361         }
14362         spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14363         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14364         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14365
14366         /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14367         if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14368                 spin_lock_irqsave(&phba->hbalock, iflags);
14369                 /* Release the mailbox command posting token */
14370                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14371                 phba->sli.mbox_active = NULL;
14372                 if (bf_get(lpfc_trailer_consumed, mcqe))
14373                         lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14374                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14375
14376                 /* Post the next mbox command, if there is one */
14377                 lpfc_sli4_post_async_mbox(phba);
14378
14379                 /* Process cmpl now */
14380                 if (pmb->mbox_cmpl)
14381                         pmb->mbox_cmpl(phba, pmb);
14382                 return false;
14383         }
14384
14385         /* There is mailbox completion work to queue to the worker thread */
14386         spin_lock_irqsave(&phba->hbalock, iflags);
14387         __lpfc_mbox_cmpl_put(phba, pmb);
14388         phba->work_ha |= HA_MBATT;
14389         spin_unlock_irqrestore(&phba->hbalock, iflags);
14390         workposted = true;
14391
14392 send_current_mbox:
14393         spin_lock_irqsave(&phba->hbalock, iflags);
14394         /* Release the mailbox command posting token */
14395         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14396         /* Setting active mailbox pointer need to be in sync to flag clear */
14397         phba->sli.mbox_active = NULL;
14398         if (bf_get(lpfc_trailer_consumed, mcqe))
14399                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14400         spin_unlock_irqrestore(&phba->hbalock, iflags);
14401         /* Wake up worker thread to post the next pending mailbox command */
14402         lpfc_worker_wake_up(phba);
14403         return workposted;
14404
14405 out_no_mqe_complete:
14406         spin_lock_irqsave(&phba->hbalock, iflags);
14407         if (bf_get(lpfc_trailer_consumed, mcqe))
14408                 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14409         spin_unlock_irqrestore(&phba->hbalock, iflags);
14410         return false;
14411 }
14412
14413 /**
14414  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14415  * @phba: Pointer to HBA context object.
14416  * @cq: Pointer to associated CQ
14417  * @cqe: Pointer to mailbox completion queue entry.
14418  *
14419  * This routine process a mailbox completion queue entry, it invokes the
14420  * proper mailbox complete handling or asynchronous event handling routine
14421  * according to the MCQE's async bit.
14422  *
14423  * Return: true if work posted to worker thread, otherwise false.
14424  **/
14425 static bool
14426 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14427                          struct lpfc_cqe *cqe)
14428 {
14429         struct lpfc_mcqe mcqe;
14430         bool workposted;
14431
14432         cq->CQ_mbox++;
14433
14434         /* Copy the mailbox MCQE and convert endian order as needed */
14435         lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14436
14437         /* Invoke the proper event handling routine */
14438         if (!bf_get(lpfc_trailer_async, &mcqe))
14439                 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14440         else
14441                 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14442         return workposted;
14443 }
14444
14445 /**
14446  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14447  * @phba: Pointer to HBA context object.
14448  * @cq: Pointer to associated CQ
14449  * @wcqe: Pointer to work-queue completion queue entry.
14450  *
14451  * This routine handles an ELS work-queue completion event.
14452  *
14453  * Return: true if work posted to worker thread, otherwise false.
14454  **/
14455 static bool
14456 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14457                              struct lpfc_wcqe_complete *wcqe)
14458 {
14459         struct lpfc_iocbq *irspiocbq;
14460         unsigned long iflags;
14461         struct lpfc_sli_ring *pring = cq->pring;
14462         int txq_cnt = 0;
14463         int txcmplq_cnt = 0;
14464
14465         /* Check for response status */
14466         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14467                 /* Log the error status */
14468                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14469                                 "0357 ELS CQE error: status=x%x: "
14470                                 "CQE: %08x %08x %08x %08x\n",
14471                                 bf_get(lpfc_wcqe_c_status, wcqe),
14472                                 wcqe->word0, wcqe->total_data_placed,
14473                                 wcqe->parameter, wcqe->word3);
14474         }
14475
14476         /* Get an irspiocbq for later ELS response processing use */
14477         irspiocbq = lpfc_sli_get_iocbq(phba);
14478         if (!irspiocbq) {
14479                 if (!list_empty(&pring->txq))
14480                         txq_cnt++;
14481                 if (!list_empty(&pring->txcmplq))
14482                         txcmplq_cnt++;
14483                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14484                         "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14485                         "els_txcmplq_cnt=%d\n",
14486                         txq_cnt, phba->iocb_cnt,
14487                         txcmplq_cnt);
14488                 return false;
14489         }
14490
14491         /* Save off the slow-path queue event for work thread to process */
14492         memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14493         spin_lock_irqsave(&phba->hbalock, iflags);
14494         list_add_tail(&irspiocbq->cq_event.list,
14495                       &phba->sli4_hba.sp_queue_event);
14496         phba->hba_flag |= HBA_SP_QUEUE_EVT;
14497         spin_unlock_irqrestore(&phba->hbalock, iflags);
14498
14499         return true;
14500 }
14501
14502 /**
14503  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14504  * @phba: Pointer to HBA context object.
14505  * @wcqe: Pointer to work-queue completion queue entry.
14506  *
14507  * This routine handles slow-path WQ entry consumed event by invoking the
14508  * proper WQ release routine to the slow-path WQ.
14509  **/
14510 static void
14511 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14512                              struct lpfc_wcqe_release *wcqe)
14513 {
14514         /* sanity check on queue memory */
14515         if (unlikely(!phba->sli4_hba.els_wq))
14516                 return;
14517         /* Check for the slow-path ELS work queue */
14518         if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14519                 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14520                                      bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14521         else
14522                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14523                                 "2579 Slow-path wqe consume event carries "
14524                                 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14525                                 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14526                                 phba->sli4_hba.els_wq->queue_id);
14527 }
14528
14529 /**
14530  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14531  * @phba: Pointer to HBA context object.
14532  * @cq: Pointer to a WQ completion queue.
14533  * @wcqe: Pointer to work-queue completion queue entry.
14534  *
14535  * This routine handles an XRI abort event.
14536  *
14537  * Return: true if work posted to worker thread, otherwise false.
14538  **/
14539 static bool
14540 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14541                                    struct lpfc_queue *cq,
14542                                    struct sli4_wcqe_xri_aborted *wcqe)
14543 {
14544         bool workposted = false;
14545         struct lpfc_cq_event *cq_event;
14546         unsigned long iflags;
14547
14548         switch (cq->subtype) {
14549         case LPFC_IO:
14550                 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14551                 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14552                         /* Notify aborted XRI for NVME work queue */
14553                         if (phba->nvmet_support)
14554                                 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14555                 }
14556                 workposted = false;
14557                 break;
14558         case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14559         case LPFC_ELS:
14560                 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14561                 if (!cq_event) {
14562                         workposted = false;
14563                         break;
14564                 }
14565                 cq_event->hdwq = cq->hdwq;
14566                 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14567                                   iflags);
14568                 list_add_tail(&cq_event->list,
14569                               &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14570                 /* Set the els xri abort event flag */
14571                 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14572                 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14573                                        iflags);
14574                 workposted = true;
14575                 break;
14576         default:
14577                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14578                                 "0603 Invalid CQ subtype %d: "
14579                                 "%08x %08x %08x %08x\n",
14580                                 cq->subtype, wcqe->word0, wcqe->parameter,
14581                                 wcqe->word2, wcqe->word3);
14582                 workposted = false;
14583                 break;
14584         }
14585         return workposted;
14586 }
14587
14588 #define FC_RCTL_MDS_DIAGS       0xF4
14589
14590 /**
14591  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14592  * @phba: Pointer to HBA context object.
14593  * @rcqe: Pointer to receive-queue completion queue entry.
14594  *
14595  * This routine process a receive-queue completion queue entry.
14596  *
14597  * Return: true if work posted to worker thread, otherwise false.
14598  **/
14599 static bool
14600 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14601 {
14602         bool workposted = false;
14603         struct fc_frame_header *fc_hdr;
14604         struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14605         struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14606         struct lpfc_nvmet_tgtport *tgtp;
14607         struct hbq_dmabuf *dma_buf;
14608         uint32_t status, rq_id;
14609         unsigned long iflags;
14610
14611         /* sanity check on queue memory */
14612         if (unlikely(!hrq) || unlikely(!drq))
14613                 return workposted;
14614
14615         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14616                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14617         else
14618                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14619         if (rq_id != hrq->queue_id)
14620                 goto out;
14621
14622         status = bf_get(lpfc_rcqe_status, rcqe);
14623         switch (status) {
14624         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14625                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14626                                 "2537 Receive Frame Truncated!!\n");
14627                 fallthrough;
14628         case FC_STATUS_RQ_SUCCESS:
14629                 spin_lock_irqsave(&phba->hbalock, iflags);
14630                 lpfc_sli4_rq_release(hrq, drq);
14631                 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14632                 if (!dma_buf) {
14633                         hrq->RQ_no_buf_found++;
14634                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14635                         goto out;
14636                 }
14637                 hrq->RQ_rcv_buf++;
14638                 hrq->RQ_buf_posted--;
14639                 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14640
14641                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14642
14643                 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14644                     fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14645                         spin_unlock_irqrestore(&phba->hbalock, iflags);
14646                         /* Handle MDS Loopback frames */
14647                         if  (!(phba->pport->load_flag & FC_UNLOADING))
14648                                 lpfc_sli4_handle_mds_loopback(phba->pport,
14649                                                               dma_buf);
14650                         else
14651                                 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14652                         break;
14653                 }
14654
14655                 /* save off the frame for the work thread to process */
14656                 list_add_tail(&dma_buf->cq_event.list,
14657                               &phba->sli4_hba.sp_queue_event);
14658                 /* Frame received */
14659                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14660                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14661                 workposted = true;
14662                 break;
14663         case FC_STATUS_INSUFF_BUF_FRM_DISC:
14664                 if (phba->nvmet_support) {
14665                         tgtp = phba->targetport->private;
14666                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14667                                         "6402 RQE Error x%x, posted %d err_cnt "
14668                                         "%d: %x %x %x\n",
14669                                         status, hrq->RQ_buf_posted,
14670                                         hrq->RQ_no_posted_buf,
14671                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
14672                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
14673                                         atomic_read(&tgtp->xmt_fcp_release));
14674                 }
14675                 fallthrough;
14676
14677         case FC_STATUS_INSUFF_BUF_NEED_BUF:
14678                 hrq->RQ_no_posted_buf++;
14679                 /* Post more buffers if possible */
14680                 spin_lock_irqsave(&phba->hbalock, iflags);
14681                 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14682                 spin_unlock_irqrestore(&phba->hbalock, iflags);
14683                 workposted = true;
14684                 break;
14685         }
14686 out:
14687         return workposted;
14688 }
14689
14690 /**
14691  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14692  * @phba: Pointer to HBA context object.
14693  * @cq: Pointer to the completion queue.
14694  * @cqe: Pointer to a completion queue entry.
14695  *
14696  * This routine process a slow-path work-queue or receive queue completion queue
14697  * entry.
14698  *
14699  * Return: true if work posted to worker thread, otherwise false.
14700  **/
14701 static bool
14702 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14703                          struct lpfc_cqe *cqe)
14704 {
14705         struct lpfc_cqe cqevt;
14706         bool workposted = false;
14707
14708         /* Copy the work queue CQE and convert endian order if needed */
14709         lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14710
14711         /* Check and process for different type of WCQE and dispatch */
14712         switch (bf_get(lpfc_cqe_code, &cqevt)) {
14713         case CQE_CODE_COMPL_WQE:
14714                 /* Process the WQ/RQ complete event */
14715                 phba->last_completion_time = jiffies;
14716                 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14717                                 (struct lpfc_wcqe_complete *)&cqevt);
14718                 break;
14719         case CQE_CODE_RELEASE_WQE:
14720                 /* Process the WQ release event */
14721                 lpfc_sli4_sp_handle_rel_wcqe(phba,
14722                                 (struct lpfc_wcqe_release *)&cqevt);
14723                 break;
14724         case CQE_CODE_XRI_ABORTED:
14725                 /* Process the WQ XRI abort event */
14726                 phba->last_completion_time = jiffies;
14727                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14728                                 (struct sli4_wcqe_xri_aborted *)&cqevt);
14729                 break;
14730         case CQE_CODE_RECEIVE:
14731         case CQE_CODE_RECEIVE_V1:
14732                 /* Process the RQ event */
14733                 phba->last_completion_time = jiffies;
14734                 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14735                                 (struct lpfc_rcqe *)&cqevt);
14736                 break;
14737         default:
14738                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14739                                 "0388 Not a valid WCQE code: x%x\n",
14740                                 bf_get(lpfc_cqe_code, &cqevt));
14741                 break;
14742         }
14743         return workposted;
14744 }
14745
14746 /**
14747  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14748  * @phba: Pointer to HBA context object.
14749  * @eqe: Pointer to fast-path event queue entry.
14750  * @speq: Pointer to slow-path event queue.
14751  *
14752  * This routine process a event queue entry from the slow-path event queue.
14753  * It will check the MajorCode and MinorCode to determine this is for a
14754  * completion event on a completion queue, if not, an error shall be logged
14755  * and just return. Otherwise, it will get to the corresponding completion
14756  * queue and process all the entries on that completion queue, rearm the
14757  * completion queue, and then return.
14758  *
14759  **/
14760 static void
14761 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14762         struct lpfc_queue *speq)
14763 {
14764         struct lpfc_queue *cq = NULL, *childq;
14765         uint16_t cqid;
14766         int ret = 0;
14767
14768         /* Get the reference to the corresponding CQ */
14769         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14770
14771         list_for_each_entry(childq, &speq->child_list, list) {
14772                 if (childq->queue_id == cqid) {
14773                         cq = childq;
14774                         break;
14775                 }
14776         }
14777         if (unlikely(!cq)) {
14778                 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14779                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14780                                         "0365 Slow-path CQ identifier "
14781                                         "(%d) does not exist\n", cqid);
14782                 return;
14783         }
14784
14785         /* Save EQ associated with this CQ */
14786         cq->assoc_qp = speq;
14787
14788         if (is_kdump_kernel())
14789                 ret = queue_work(phba->wq, &cq->spwork);
14790         else
14791                 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14792
14793         if (!ret)
14794                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14795                                 "0390 Cannot schedule queue work "
14796                                 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14797                                 cqid, cq->queue_id, raw_smp_processor_id());
14798 }
14799
14800 /**
14801  * __lpfc_sli4_process_cq - Process elements of a CQ
14802  * @phba: Pointer to HBA context object.
14803  * @cq: Pointer to CQ to be processed
14804  * @handler: Routine to process each cqe
14805  * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14806  * @poll_mode: Polling mode we were called from
14807  *
14808  * This routine processes completion queue entries in a CQ. While a valid
14809  * queue element is found, the handler is called. During processing checks
14810  * are made for periodic doorbell writes to let the hardware know of
14811  * element consumption.
14812  *
14813  * If the max limit on cqes to process is hit, or there are no more valid
14814  * entries, the loop stops. If we processed a sufficient number of elements,
14815  * meaning there is sufficient load, rather than rearming and generating
14816  * another interrupt, a cq rescheduling delay will be set. A delay of 0
14817  * indicates no rescheduling.
14818  *
14819  * Returns True if work scheduled, False otherwise.
14820  **/
14821 static bool
14822 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14823         bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14824                         struct lpfc_cqe *), unsigned long *delay,
14825                         enum lpfc_poll_mode poll_mode)
14826 {
14827         struct lpfc_cqe *cqe;
14828         bool workposted = false;
14829         int count = 0, consumed = 0;
14830         bool arm = true;
14831
14832         /* default - no reschedule */
14833         *delay = 0;
14834
14835         if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14836                 goto rearm_and_exit;
14837
14838         /* Process all the entries to the CQ */
14839         cq->q_flag = 0;
14840         cqe = lpfc_sli4_cq_get(cq);
14841         while (cqe) {
14842                 workposted |= handler(phba, cq, cqe);
14843                 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14844
14845                 consumed++;
14846                 if (!(++count % cq->max_proc_limit))
14847                         break;
14848
14849                 if (!(count % cq->notify_interval)) {
14850                         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14851                                                 LPFC_QUEUE_NOARM);
14852                         consumed = 0;
14853                         cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14854                 }
14855
14856                 if (count == LPFC_NVMET_CQ_NOTIFY)
14857                         cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14858
14859                 cqe = lpfc_sli4_cq_get(cq);
14860         }
14861         if (count >= phba->cfg_cq_poll_threshold) {
14862                 *delay = 1;
14863                 arm = false;
14864         }
14865
14866         /* Note: complete the irq_poll softirq before rearming CQ */
14867         if (poll_mode == LPFC_IRQ_POLL)
14868                 irq_poll_complete(&cq->iop);
14869
14870         /* Track the max number of CQEs processed in 1 EQ */
14871         if (count > cq->CQ_max_cqe)
14872                 cq->CQ_max_cqe = count;
14873
14874         cq->assoc_qp->EQ_cqe_cnt += count;
14875
14876         /* Catch the no cq entry condition */
14877         if (unlikely(count == 0))
14878                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14879                                 "0369 No entry from completion queue "
14880                                 "qid=%d\n", cq->queue_id);
14881
14882         xchg(&cq->queue_claimed, 0);
14883
14884 rearm_and_exit:
14885         phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14886                         arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14887
14888         return workposted;
14889 }
14890
14891 /**
14892  * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14893  * @cq: pointer to CQ to process
14894  *
14895  * This routine calls the cq processing routine with a handler specific
14896  * to the type of queue bound to it.
14897  *
14898  * The CQ routine returns two values: the first is the calling status,
14899  * which indicates whether work was queued to the  background discovery
14900  * thread. If true, the routine should wakeup the discovery thread;
14901  * the second is the delay parameter. If non-zero, rather than rearming
14902  * the CQ and yet another interrupt, the CQ handler should be queued so
14903  * that it is processed in a subsequent polling action. The value of
14904  * the delay indicates when to reschedule it.
14905  **/
14906 static void
14907 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14908 {
14909         struct lpfc_hba *phba = cq->phba;
14910         unsigned long delay;
14911         bool workposted = false;
14912         int ret = 0;
14913
14914         /* Process and rearm the CQ */
14915         switch (cq->type) {
14916         case LPFC_MCQ:
14917                 workposted |= __lpfc_sli4_process_cq(phba, cq,
14918                                                 lpfc_sli4_sp_handle_mcqe,
14919                                                 &delay, LPFC_QUEUE_WORK);
14920                 break;
14921         case LPFC_WCQ:
14922                 if (cq->subtype == LPFC_IO)
14923                         workposted |= __lpfc_sli4_process_cq(phba, cq,
14924                                                 lpfc_sli4_fp_handle_cqe,
14925                                                 &delay, LPFC_QUEUE_WORK);
14926                 else
14927                         workposted |= __lpfc_sli4_process_cq(phba, cq,
14928                                                 lpfc_sli4_sp_handle_cqe,
14929                                                 &delay, LPFC_QUEUE_WORK);
14930                 break;
14931         default:
14932                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14933                                 "0370 Invalid completion queue type (%d)\n",
14934                                 cq->type);
14935                 return;
14936         }
14937
14938         if (delay) {
14939                 if (is_kdump_kernel())
14940                         ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14941                                                 delay);
14942                 else
14943                         ret = queue_delayed_work_on(cq->chann, phba->wq,
14944                                                 &cq->sched_spwork, delay);
14945                 if (!ret)
14946                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14947                                 "0394 Cannot schedule queue work "
14948                                 "for cqid=%d on CPU %d\n",
14949                                 cq->queue_id, cq->chann);
14950         }
14951
14952         /* wake up worker thread if there are works to be done */
14953         if (workposted)
14954                 lpfc_worker_wake_up(phba);
14955 }
14956
14957 /**
14958  * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14959  *   interrupt
14960  * @work: pointer to work element
14961  *
14962  * translates from the work handler and calls the slow-path handler.
14963  **/
14964 static void
14965 lpfc_sli4_sp_process_cq(struct work_struct *work)
14966 {
14967         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14968
14969         __lpfc_sli4_sp_process_cq(cq);
14970 }
14971
14972 /**
14973  * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14974  * @work: pointer to work element
14975  *
14976  * translates from the work handler and calls the slow-path handler.
14977  **/
14978 static void
14979 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14980 {
14981         struct lpfc_queue *cq = container_of(to_delayed_work(work),
14982                                         struct lpfc_queue, sched_spwork);
14983
14984         __lpfc_sli4_sp_process_cq(cq);
14985 }
14986
14987 /**
14988  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14989  * @phba: Pointer to HBA context object.
14990  * @cq: Pointer to associated CQ
14991  * @wcqe: Pointer to work-queue completion queue entry.
14992  *
14993  * This routine process a fast-path work queue completion entry from fast-path
14994  * event queue for FCP command response completion.
14995  **/
14996 static void
14997 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14998                              struct lpfc_wcqe_complete *wcqe)
14999 {
15000         struct lpfc_sli_ring *pring = cq->pring;
15001         struct lpfc_iocbq *cmdiocbq;
15002         unsigned long iflags;
15003
15004         /* Check for response status */
15005         if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15006                 /* If resource errors reported from HBA, reduce queue
15007                  * depth of the SCSI device.
15008                  */
15009                 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15010                      IOSTAT_LOCAL_REJECT)) &&
15011                     ((wcqe->parameter & IOERR_PARAM_MASK) ==
15012                      IOERR_NO_RESOURCES))
15013                         phba->lpfc_rampdown_queue_depth(phba);
15014
15015                 /* Log the cmpl status */
15016                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15017                                 "0373 FCP CQE cmpl: status=x%x: "
15018                                 "CQE: %08x %08x %08x %08x\n",
15019                                 bf_get(lpfc_wcqe_c_status, wcqe),
15020                                 wcqe->word0, wcqe->total_data_placed,
15021                                 wcqe->parameter, wcqe->word3);
15022         }
15023
15024         /* Look up the FCP command IOCB and create pseudo response IOCB */
15025         spin_lock_irqsave(&pring->ring_lock, iflags);
15026         pring->stats.iocb_event++;
15027         cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15028                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15029         spin_unlock_irqrestore(&pring->ring_lock, iflags);
15030         if (unlikely(!cmdiocbq)) {
15031                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15032                                 "0374 FCP complete with no corresponding "
15033                                 "cmdiocb: iotag (%d)\n",
15034                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15035                 return;
15036         }
15037 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15038         cmdiocbq->isr_timestamp = cq->isr_timestamp;
15039 #endif
15040         if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15041                 spin_lock_irqsave(&phba->hbalock, iflags);
15042                 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15043                 spin_unlock_irqrestore(&phba->hbalock, iflags);
15044         }
15045
15046         if (cmdiocbq->cmd_cmpl) {
15047                 /* For FCP the flag is cleared in cmd_cmpl */
15048                 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15049                     cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15050                         spin_lock_irqsave(&phba->hbalock, iflags);
15051                         cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15052                         spin_unlock_irqrestore(&phba->hbalock, iflags);
15053                 }
15054
15055                 /* Pass the cmd_iocb and the wcqe to the upper layer */
15056                 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15057                        sizeof(struct lpfc_wcqe_complete));
15058                 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15059         } else {
15060                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15061                                 "0375 FCP cmdiocb not callback function "
15062                                 "iotag: (%d)\n",
15063                                 bf_get(lpfc_wcqe_c_request_tag, wcqe));
15064         }
15065 }
15066
15067 /**
15068  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15069  * @phba: Pointer to HBA context object.
15070  * @cq: Pointer to completion queue.
15071  * @wcqe: Pointer to work-queue completion queue entry.
15072  *
15073  * This routine handles an fast-path WQ entry consumed event by invoking the
15074  * proper WQ release routine to the slow-path WQ.
15075  **/
15076 static void
15077 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15078                              struct lpfc_wcqe_release *wcqe)
15079 {
15080         struct lpfc_queue *childwq;
15081         bool wqid_matched = false;
15082         uint16_t hba_wqid;
15083
15084         /* Check for fast-path FCP work queue release */
15085         hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15086         list_for_each_entry(childwq, &cq->child_list, list) {
15087                 if (childwq->queue_id == hba_wqid) {
15088                         lpfc_sli4_wq_release(childwq,
15089                                         bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15090                         if (childwq->q_flag & HBA_NVMET_WQFULL)
15091                                 lpfc_nvmet_wqfull_process(phba, childwq);
15092                         wqid_matched = true;
15093                         break;
15094                 }
15095         }
15096         /* Report warning log message if no match found */
15097         if (wqid_matched != true)
15098                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15099                                 "2580 Fast-path wqe consume event carries "
15100                                 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15101 }
15102
15103 /**
15104  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15105  * @phba: Pointer to HBA context object.
15106  * @cq: Pointer to completion queue.
15107  * @rcqe: Pointer to receive-queue completion queue entry.
15108  *
15109  * This routine process a receive-queue completion queue entry.
15110  *
15111  * Return: true if work posted to worker thread, otherwise false.
15112  **/
15113 static bool
15114 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15115                             struct lpfc_rcqe *rcqe)
15116 {
15117         bool workposted = false;
15118         struct lpfc_queue *hrq;
15119         struct lpfc_queue *drq;
15120         struct rqb_dmabuf *dma_buf;
15121         struct fc_frame_header *fc_hdr;
15122         struct lpfc_nvmet_tgtport *tgtp;
15123         uint32_t status, rq_id;
15124         unsigned long iflags;
15125         uint32_t fctl, idx;
15126
15127         if ((phba->nvmet_support == 0) ||
15128             (phba->sli4_hba.nvmet_cqset == NULL))
15129                 return workposted;
15130
15131         idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15132         hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15133         drq = phba->sli4_hba.nvmet_mrq_data[idx];
15134
15135         /* sanity check on queue memory */
15136         if (unlikely(!hrq) || unlikely(!drq))
15137                 return workposted;
15138
15139         if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15140                 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15141         else
15142                 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15143
15144         if ((phba->nvmet_support == 0) ||
15145             (rq_id != hrq->queue_id))
15146                 return workposted;
15147
15148         status = bf_get(lpfc_rcqe_status, rcqe);
15149         switch (status) {
15150         case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15151                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15152                                 "6126 Receive Frame Truncated!!\n");
15153                 fallthrough;
15154         case FC_STATUS_RQ_SUCCESS:
15155                 spin_lock_irqsave(&phba->hbalock, iflags);
15156                 lpfc_sli4_rq_release(hrq, drq);
15157                 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15158                 if (!dma_buf) {
15159                         hrq->RQ_no_buf_found++;
15160                         spin_unlock_irqrestore(&phba->hbalock, iflags);
15161                         goto out;
15162                 }
15163                 spin_unlock_irqrestore(&phba->hbalock, iflags);
15164                 hrq->RQ_rcv_buf++;
15165                 hrq->RQ_buf_posted--;
15166                 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15167
15168                 /* Just some basic sanity checks on FCP Command frame */
15169                 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15170                         fc_hdr->fh_f_ctl[1] << 8 |
15171                         fc_hdr->fh_f_ctl[2]);
15172                 if (((fctl &
15173                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15174                     (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15175                     (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15176                         goto drop;
15177
15178                 if (fc_hdr->fh_type == FC_TYPE_FCP) {
15179                         dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15180                         lpfc_nvmet_unsol_fcp_event(
15181                                 phba, idx, dma_buf, cq->isr_timestamp,
15182                                 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15183                         return false;
15184                 }
15185 drop:
15186                 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15187                 break;
15188         case FC_STATUS_INSUFF_BUF_FRM_DISC:
15189                 if (phba->nvmet_support) {
15190                         tgtp = phba->targetport->private;
15191                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15192                                         "6401 RQE Error x%x, posted %d err_cnt "
15193                                         "%d: %x %x %x\n",
15194                                         status, hrq->RQ_buf_posted,
15195                                         hrq->RQ_no_posted_buf,
15196                                         atomic_read(&tgtp->rcv_fcp_cmd_in),
15197                                         atomic_read(&tgtp->rcv_fcp_cmd_out),
15198                                         atomic_read(&tgtp->xmt_fcp_release));
15199                 }
15200                 fallthrough;
15201
15202         case FC_STATUS_INSUFF_BUF_NEED_BUF:
15203                 hrq->RQ_no_posted_buf++;
15204                 /* Post more buffers if possible */
15205                 break;
15206         }
15207 out:
15208         return workposted;
15209 }
15210
15211 /**
15212  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15213  * @phba: adapter with cq
15214  * @cq: Pointer to the completion queue.
15215  * @cqe: Pointer to fast-path completion queue entry.
15216  *
15217  * This routine process a fast-path work queue completion entry from fast-path
15218  * event queue for FCP command response completion.
15219  *
15220  * Return: true if work posted to worker thread, otherwise false.
15221  **/
15222 static bool
15223 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15224                          struct lpfc_cqe *cqe)
15225 {
15226         struct lpfc_wcqe_release wcqe;
15227         bool workposted = false;
15228
15229         /* Copy the work queue CQE and convert endian order if needed */
15230         lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15231
15232         /* Check and process for different type of WCQE and dispatch */
15233         switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15234         case CQE_CODE_COMPL_WQE:
15235         case CQE_CODE_NVME_ERSP:
15236                 cq->CQ_wq++;
15237                 /* Process the WQ complete event */
15238                 phba->last_completion_time = jiffies;
15239                 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15240                         lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15241                                 (struct lpfc_wcqe_complete *)&wcqe);
15242                 break;
15243         case CQE_CODE_RELEASE_WQE:
15244                 cq->CQ_release_wqe++;
15245                 /* Process the WQ release event */
15246                 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15247                                 (struct lpfc_wcqe_release *)&wcqe);
15248                 break;
15249         case CQE_CODE_XRI_ABORTED:
15250                 cq->CQ_xri_aborted++;
15251                 /* Process the WQ XRI abort event */
15252                 phba->last_completion_time = jiffies;
15253                 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15254                                 (struct sli4_wcqe_xri_aborted *)&wcqe);
15255                 break;
15256         case CQE_CODE_RECEIVE_V1:
15257         case CQE_CODE_RECEIVE:
15258                 phba->last_completion_time = jiffies;
15259                 if (cq->subtype == LPFC_NVMET) {
15260                         workposted = lpfc_sli4_nvmet_handle_rcqe(
15261                                 phba, cq, (struct lpfc_rcqe *)&wcqe);
15262                 }
15263                 break;
15264         default:
15265                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15266                                 "0144 Not a valid CQE code: x%x\n",
15267                                 bf_get(lpfc_wcqe_c_code, &wcqe));
15268                 break;
15269         }
15270         return workposted;
15271 }
15272
15273 /**
15274  * lpfc_sli4_sched_cq_work - Schedules cq work
15275  * @phba: Pointer to HBA context object.
15276  * @cq: Pointer to CQ
15277  * @cqid: CQ ID
15278  *
15279  * This routine checks the poll mode of the CQ corresponding to
15280  * cq->chann, then either schedules a softirq or queue_work to complete
15281  * cq work.
15282  *
15283  * queue_work path is taken if in NVMET mode, or if poll_mode is in
15284  * LPFC_QUEUE_WORK mode.  Otherwise, softirq path is taken.
15285  *
15286  **/
15287 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15288                                     struct lpfc_queue *cq, uint16_t cqid)
15289 {
15290         int ret = 0;
15291
15292         switch (cq->poll_mode) {
15293         case LPFC_IRQ_POLL:
15294                 /* CGN mgmt is mutually exclusive from softirq processing */
15295                 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15296                         irq_poll_sched(&cq->iop);
15297                         break;
15298                 }
15299                 fallthrough;
15300         case LPFC_QUEUE_WORK:
15301         default:
15302                 if (is_kdump_kernel())
15303                         ret = queue_work(phba->wq, &cq->irqwork);
15304                 else
15305                         ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15306                 if (!ret)
15307                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15308                                         "0383 Cannot schedule queue work "
15309                                         "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15310                                         cqid, cq->queue_id,
15311                                         raw_smp_processor_id());
15312         }
15313 }
15314
15315 /**
15316  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15317  * @phba: Pointer to HBA context object.
15318  * @eq: Pointer to the queue structure.
15319  * @eqe: Pointer to fast-path event queue entry.
15320  *
15321  * This routine process a event queue entry from the fast-path event queue.
15322  * It will check the MajorCode and MinorCode to determine this is for a
15323  * completion event on a completion queue, if not, an error shall be logged
15324  * and just return. Otherwise, it will get to the corresponding completion
15325  * queue and process all the entries on the completion queue, rearm the
15326  * completion queue, and then return.
15327  **/
15328 static void
15329 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15330                          struct lpfc_eqe *eqe)
15331 {
15332         struct lpfc_queue *cq = NULL;
15333         uint32_t qidx = eq->hdwq;
15334         uint16_t cqid, id;
15335
15336         if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15337                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15338                                 "0366 Not a valid completion "
15339                                 "event: majorcode=x%x, minorcode=x%x\n",
15340                                 bf_get_le32(lpfc_eqe_major_code, eqe),
15341                                 bf_get_le32(lpfc_eqe_minor_code, eqe));
15342                 return;
15343         }
15344
15345         /* Get the reference to the corresponding CQ */
15346         cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15347
15348         /* Use the fast lookup method first */
15349         if (cqid <= phba->sli4_hba.cq_max) {
15350                 cq = phba->sli4_hba.cq_lookup[cqid];
15351                 if (cq)
15352                         goto  work_cq;
15353         }
15354
15355         /* Next check for NVMET completion */
15356         if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15357                 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15358                 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15359                         /* Process NVMET unsol rcv */
15360                         cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15361                         goto  process_cq;
15362                 }
15363         }
15364
15365         if (phba->sli4_hba.nvmels_cq &&
15366             (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15367                 /* Process NVME unsol rcv */
15368                 cq = phba->sli4_hba.nvmels_cq;
15369         }
15370
15371         /* Otherwise this is a Slow path event */
15372         if (cq == NULL) {
15373                 lpfc_sli4_sp_handle_eqe(phba, eqe,
15374                                         phba->sli4_hba.hdwq[qidx].hba_eq);
15375                 return;
15376         }
15377
15378 process_cq:
15379         if (unlikely(cqid != cq->queue_id)) {
15380                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15381                                 "0368 Miss-matched fast-path completion "
15382                                 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15383                                 cqid, cq->queue_id);
15384                 return;
15385         }
15386
15387 work_cq:
15388 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15389         if (phba->ktime_on)
15390                 cq->isr_timestamp = ktime_get_ns();
15391         else
15392                 cq->isr_timestamp = 0;
15393 #endif
15394         lpfc_sli4_sched_cq_work(phba, cq, cqid);
15395 }
15396
15397 /**
15398  * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15399  * @cq: Pointer to CQ to be processed
15400  * @poll_mode: Enum lpfc_poll_state to determine poll mode
15401  *
15402  * This routine calls the cq processing routine with the handler for
15403  * fast path CQEs.
15404  *
15405  * The CQ routine returns two values: the first is the calling status,
15406  * which indicates whether work was queued to the  background discovery
15407  * thread. If true, the routine should wakeup the discovery thread;
15408  * the second is the delay parameter. If non-zero, rather than rearming
15409  * the CQ and yet another interrupt, the CQ handler should be queued so
15410  * that it is processed in a subsequent polling action. The value of
15411  * the delay indicates when to reschedule it.
15412  **/
15413 static void
15414 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15415                            enum lpfc_poll_mode poll_mode)
15416 {
15417         struct lpfc_hba *phba = cq->phba;
15418         unsigned long delay;
15419         bool workposted = false;
15420         int ret = 0;
15421
15422         /* process and rearm the CQ */
15423         workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15424                                              &delay, poll_mode);
15425
15426         if (delay) {
15427                 if (is_kdump_kernel())
15428                         ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15429                                                 delay);
15430                 else
15431                         ret = queue_delayed_work_on(cq->chann, phba->wq,
15432                                                 &cq->sched_irqwork, delay);
15433                 if (!ret)
15434                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15435                                         "0367 Cannot schedule queue work "
15436                                         "for cqid=%d on CPU %d\n",
15437                                         cq->queue_id, cq->chann);
15438         }
15439
15440         /* wake up worker thread if there are works to be done */
15441         if (workposted)
15442                 lpfc_worker_wake_up(phba);
15443 }
15444
15445 /**
15446  * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15447  *   interrupt
15448  * @work: pointer to work element
15449  *
15450  * translates from the work handler and calls the fast-path handler.
15451  **/
15452 static void
15453 lpfc_sli4_hba_process_cq(struct work_struct *work)
15454 {
15455         struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15456
15457         __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15458 }
15459
15460 /**
15461  * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15462  * @work: pointer to work element
15463  *
15464  * translates from the work handler and calls the fast-path handler.
15465  **/
15466 static void
15467 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15468 {
15469         struct lpfc_queue *cq = container_of(to_delayed_work(work),
15470                                         struct lpfc_queue, sched_irqwork);
15471
15472         __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15473 }
15474
15475 /**
15476  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15477  * @irq: Interrupt number.
15478  * @dev_id: The device context pointer.
15479  *
15480  * This function is directly called from the PCI layer as an interrupt
15481  * service routine when device with SLI-4 interface spec is enabled with
15482  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15483  * ring event in the HBA. However, when the device is enabled with either
15484  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15485  * device-level interrupt handler. When the PCI slot is in error recovery
15486  * or the HBA is undergoing initialization, the interrupt handler will not
15487  * process the interrupt. The SCSI FCP fast-path ring event are handled in
15488  * the intrrupt context. This function is called without any lock held.
15489  * It gets the hbalock to access and update SLI data structures. Note that,
15490  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15491  * equal to that of FCP CQ index.
15492  *
15493  * The link attention and ELS ring attention events are handled
15494  * by the worker thread. The interrupt handler signals the worker thread
15495  * and returns for these events. This function is called without any lock
15496  * held. It gets the hbalock to access and update SLI data structures.
15497  *
15498  * This function returns IRQ_HANDLED when interrupt is handled else it
15499  * returns IRQ_NONE.
15500  **/
15501 irqreturn_t
15502 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15503 {
15504         struct lpfc_hba *phba;
15505         struct lpfc_hba_eq_hdl *hba_eq_hdl;
15506         struct lpfc_queue *fpeq;
15507         unsigned long iflag;
15508         int ecount = 0;
15509         int hba_eqidx;
15510         struct lpfc_eq_intr_info *eqi;
15511
15512         /* Get the driver's phba structure from the dev_id */
15513         hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15514         phba = hba_eq_hdl->phba;
15515         hba_eqidx = hba_eq_hdl->idx;
15516
15517         if (unlikely(!phba))
15518                 return IRQ_NONE;
15519         if (unlikely(!phba->sli4_hba.hdwq))
15520                 return IRQ_NONE;
15521
15522         /* Get to the EQ struct associated with this vector */
15523         fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15524         if (unlikely(!fpeq))
15525                 return IRQ_NONE;
15526
15527         /* Check device state for handling interrupt */
15528         if (unlikely(lpfc_intr_state_check(phba))) {
15529                 /* Check again for link_state with lock held */
15530                 spin_lock_irqsave(&phba->hbalock, iflag);
15531                 if (phba->link_state < LPFC_LINK_DOWN)
15532                         /* Flush, clear interrupt, and rearm the EQ */
15533                         lpfc_sli4_eqcq_flush(phba, fpeq);
15534                 spin_unlock_irqrestore(&phba->hbalock, iflag);
15535                 return IRQ_NONE;
15536         }
15537
15538         eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15539         eqi->icnt++;
15540
15541         fpeq->last_cpu = raw_smp_processor_id();
15542
15543         if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15544             fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15545             phba->cfg_auto_imax &&
15546             fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15547             phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15548                 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15549
15550         /* process and rearm the EQ */
15551         ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15552
15553         if (unlikely(ecount == 0)) {
15554                 fpeq->EQ_no_entry++;
15555                 if (phba->intr_type == MSIX)
15556                         /* MSI-X treated interrupt served as no EQ share INT */
15557                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15558                                         "0358 MSI-X interrupt with no EQE\n");
15559                 else
15560                         /* Non MSI-X treated on interrupt as EQ share INT */
15561                         return IRQ_NONE;
15562         }
15563
15564         return IRQ_HANDLED;
15565 } /* lpfc_sli4_hba_intr_handler */
15566
15567 /**
15568  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15569  * @irq: Interrupt number.
15570  * @dev_id: The device context pointer.
15571  *
15572  * This function is the device-level interrupt handler to device with SLI-4
15573  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15574  * interrupt mode is enabled and there is an event in the HBA which requires
15575  * driver attention. This function invokes the slow-path interrupt attention
15576  * handling function and fast-path interrupt attention handling function in
15577  * turn to process the relevant HBA attention events. This function is called
15578  * without any lock held. It gets the hbalock to access and update SLI data
15579  * structures.
15580  *
15581  * This function returns IRQ_HANDLED when interrupt is handled, else it
15582  * returns IRQ_NONE.
15583  **/
15584 irqreturn_t
15585 lpfc_sli4_intr_handler(int irq, void *dev_id)
15586 {
15587         struct lpfc_hba  *phba;
15588         irqreturn_t hba_irq_rc;
15589         bool hba_handled = false;
15590         int qidx;
15591
15592         /* Get the driver's phba structure from the dev_id */
15593         phba = (struct lpfc_hba *)dev_id;
15594
15595         if (unlikely(!phba))
15596                 return IRQ_NONE;
15597
15598         /*
15599          * Invoke fast-path host attention interrupt handling as appropriate.
15600          */
15601         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15602                 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15603                                         &phba->sli4_hba.hba_eq_hdl[qidx]);
15604                 if (hba_irq_rc == IRQ_HANDLED)
15605                         hba_handled |= true;
15606         }
15607
15608         return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15609 } /* lpfc_sli4_intr_handler */
15610
15611 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15612 {
15613         struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15614         struct lpfc_queue *eq;
15615
15616         rcu_read_lock();
15617
15618         list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15619                 lpfc_sli4_poll_eq(eq);
15620         if (!list_empty(&phba->poll_list))
15621                 mod_timer(&phba->cpuhp_poll_timer,
15622                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15623
15624         rcu_read_unlock();
15625 }
15626
15627 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15628 {
15629         struct lpfc_hba *phba = eq->phba;
15630
15631         /* kickstart slowpath processing if needed */
15632         if (list_empty(&phba->poll_list))
15633                 mod_timer(&phba->cpuhp_poll_timer,
15634                           jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15635
15636         list_add_rcu(&eq->_poll_list, &phba->poll_list);
15637         synchronize_rcu();
15638 }
15639
15640 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15641 {
15642         struct lpfc_hba *phba = eq->phba;
15643
15644         /* Disable slowpath processing for this eq.  Kick start the eq
15645          * by RE-ARMING the eq's ASAP
15646          */
15647         list_del_rcu(&eq->_poll_list);
15648         synchronize_rcu();
15649
15650         if (list_empty(&phba->poll_list))
15651                 del_timer_sync(&phba->cpuhp_poll_timer);
15652 }
15653
15654 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15655 {
15656         struct lpfc_queue *eq, *next;
15657
15658         list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15659                 list_del(&eq->_poll_list);
15660
15661         INIT_LIST_HEAD(&phba->poll_list);
15662         synchronize_rcu();
15663 }
15664
15665 static inline void
15666 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15667 {
15668         if (mode == eq->mode)
15669                 return;
15670         /*
15671          * currently this function is only called during a hotplug
15672          * event and the cpu on which this function is executing
15673          * is going offline.  By now the hotplug has instructed
15674          * the scheduler to remove this cpu from cpu active mask.
15675          * So we don't need to work about being put aside by the
15676          * scheduler for a high priority process.  Yes, the inte-
15677          * rrupts could come but they are known to retire ASAP.
15678          */
15679
15680         /* Disable polling in the fastpath */
15681         WRITE_ONCE(eq->mode, mode);
15682         /* flush out the store buffer */
15683         smp_wmb();
15684
15685         /*
15686          * Add this eq to the polling list and start polling. For
15687          * a grace period both interrupt handler and poller will
15688          * try to process the eq _but_ that's fine.  We have a
15689          * synchronization mechanism in place (queue_claimed) to
15690          * deal with it.  This is just a draining phase for int-
15691          * errupt handler (not eq's) as we have guranteed through
15692          * barrier that all the CPUs have seen the new CQ_POLLED
15693          * state. which will effectively disable the REARMING of
15694          * the EQ.  The whole idea is eq's die off eventually as
15695          * we are not rearming EQ's anymore.
15696          */
15697         mode ? lpfc_sli4_add_to_poll_list(eq) :
15698                lpfc_sli4_remove_from_poll_list(eq);
15699 }
15700
15701 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15702 {
15703         __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15704 }
15705
15706 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15707 {
15708         struct lpfc_hba *phba = eq->phba;
15709
15710         __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15711
15712         /* Kick start for the pending io's in h/w.
15713          * Once we switch back to interrupt processing on a eq
15714          * the io path completion will only arm eq's when it
15715          * receives a completion.  But since eq's are in disa-
15716          * rmed state it doesn't receive a completion.  This
15717          * creates a deadlock scenaro.
15718          */
15719         phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15720 }
15721
15722 /**
15723  * lpfc_sli4_queue_free - free a queue structure and associated memory
15724  * @queue: The queue structure to free.
15725  *
15726  * This function frees a queue structure and the DMAable memory used for
15727  * the host resident queue. This function must be called after destroying the
15728  * queue on the HBA.
15729  **/
15730 void
15731 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15732 {
15733         struct lpfc_dmabuf *dmabuf;
15734
15735         if (!queue)
15736                 return;
15737
15738         if (!list_empty(&queue->wq_list))
15739                 list_del(&queue->wq_list);
15740
15741         while (!list_empty(&queue->page_list)) {
15742                 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15743                                  list);
15744                 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15745                                   dmabuf->virt, dmabuf->phys);
15746                 kfree(dmabuf);
15747         }
15748         if (queue->rqbp) {
15749                 lpfc_free_rq_buffer(queue->phba, queue);
15750                 kfree(queue->rqbp);
15751         }
15752
15753         if (!list_empty(&queue->cpu_list))
15754                 list_del(&queue->cpu_list);
15755
15756         kfree(queue);
15757         return;
15758 }
15759
15760 /**
15761  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15762  * @phba: The HBA that this queue is being created on.
15763  * @page_size: The size of a queue page
15764  * @entry_size: The size of each queue entry for this queue.
15765  * @entry_count: The number of entries that this queue will handle.
15766  * @cpu: The cpu that will primarily utilize this queue.
15767  *
15768  * This function allocates a queue structure and the DMAable memory used for
15769  * the host resident queue. This function must be called before creating the
15770  * queue on the HBA.
15771  **/
15772 struct lpfc_queue *
15773 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15774                       uint32_t entry_size, uint32_t entry_count, int cpu)
15775 {
15776         struct lpfc_queue *queue;
15777         struct lpfc_dmabuf *dmabuf;
15778         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15779         uint16_t x, pgcnt;
15780
15781         if (!phba->sli4_hba.pc_sli4_params.supported)
15782                 hw_page_size = page_size;
15783
15784         pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15785
15786         /* If needed, Adjust page count to match the max the adapter supports */
15787         if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15788                 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15789
15790         queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15791                              GFP_KERNEL, cpu_to_node(cpu));
15792         if (!queue)
15793                 return NULL;
15794
15795         INIT_LIST_HEAD(&queue->list);
15796         INIT_LIST_HEAD(&queue->_poll_list);
15797         INIT_LIST_HEAD(&queue->wq_list);
15798         INIT_LIST_HEAD(&queue->wqfull_list);
15799         INIT_LIST_HEAD(&queue->page_list);
15800         INIT_LIST_HEAD(&queue->child_list);
15801         INIT_LIST_HEAD(&queue->cpu_list);
15802
15803         /* Set queue parameters now.  If the system cannot provide memory
15804          * resources, the free routine needs to know what was allocated.
15805          */
15806         queue->page_count = pgcnt;
15807         queue->q_pgs = (void **)&queue[1];
15808         queue->entry_cnt_per_pg = hw_page_size / entry_size;
15809         queue->entry_size = entry_size;
15810         queue->entry_count = entry_count;
15811         queue->page_size = hw_page_size;
15812         queue->phba = phba;
15813
15814         for (x = 0; x < queue->page_count; x++) {
15815                 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15816                                       dev_to_node(&phba->pcidev->dev));
15817                 if (!dmabuf)
15818                         goto out_fail;
15819                 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15820                                                   hw_page_size, &dmabuf->phys,
15821                                                   GFP_KERNEL);
15822                 if (!dmabuf->virt) {
15823                         kfree(dmabuf);
15824                         goto out_fail;
15825                 }
15826                 dmabuf->buffer_tag = x;
15827                 list_add_tail(&dmabuf->list, &queue->page_list);
15828                 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15829                 queue->q_pgs[x] = dmabuf->virt;
15830         }
15831         INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15832         INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15833         INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15834         INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15835
15836         /* notify_interval will be set during q creation */
15837
15838         return queue;
15839 out_fail:
15840         lpfc_sli4_queue_free(queue);
15841         return NULL;
15842 }
15843
15844 /**
15845  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15846  * @phba: HBA structure that indicates port to create a queue on.
15847  * @pci_barset: PCI BAR set flag.
15848  *
15849  * This function shall perform iomap of the specified PCI BAR address to host
15850  * memory address if not already done so and return it. The returned host
15851  * memory address can be NULL.
15852  */
15853 static void __iomem *
15854 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15855 {
15856         if (!phba->pcidev)
15857                 return NULL;
15858
15859         switch (pci_barset) {
15860         case WQ_PCI_BAR_0_AND_1:
15861                 return phba->pci_bar0_memmap_p;
15862         case WQ_PCI_BAR_2_AND_3:
15863                 return phba->pci_bar2_memmap_p;
15864         case WQ_PCI_BAR_4_AND_5:
15865                 return phba->pci_bar4_memmap_p;
15866         default:
15867                 break;
15868         }
15869         return NULL;
15870 }
15871
15872 /**
15873  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15874  * @phba: HBA structure that EQs are on.
15875  * @startq: The starting EQ index to modify
15876  * @numq: The number of EQs (consecutive indexes) to modify
15877  * @usdelay: amount of delay
15878  *
15879  * This function revises the EQ delay on 1 or more EQs. The EQ delay
15880  * is set either by writing to a register (if supported by the SLI Port)
15881  * or by mailbox command. The mailbox command allows several EQs to be
15882  * updated at once.
15883  *
15884  * The @phba struct is used to send a mailbox command to HBA. The @startq
15885  * is used to get the starting EQ index to change. The @numq value is
15886  * used to specify how many consecutive EQ indexes, starting at EQ index,
15887  * are to be changed. This function is asynchronous and will wait for any
15888  * mailbox commands to finish before returning.
15889  *
15890  * On success this function will return a zero. If unable to allocate
15891  * enough memory this function will return -ENOMEM. If a mailbox command
15892  * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15893  * have had their delay multipler changed.
15894  **/
15895 void
15896 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15897                          uint32_t numq, uint32_t usdelay)
15898 {
15899         struct lpfc_mbx_modify_eq_delay *eq_delay;
15900         LPFC_MBOXQ_t *mbox;
15901         struct lpfc_queue *eq;
15902         int cnt = 0, rc, length;
15903         uint32_t shdr_status, shdr_add_status;
15904         uint32_t dmult;
15905         int qidx;
15906         union lpfc_sli4_cfg_shdr *shdr;
15907
15908         if (startq >= phba->cfg_irq_chann)
15909                 return;
15910
15911         if (usdelay > 0xFFFF) {
15912                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15913                                 "6429 usdelay %d too large. Scaled down to "
15914                                 "0xFFFF.\n", usdelay);
15915                 usdelay = 0xFFFF;
15916         }
15917
15918         /* set values by EQ_DELAY register if supported */
15919         if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15920                 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15921                         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15922                         if (!eq)
15923                                 continue;
15924
15925                         lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15926
15927                         if (++cnt >= numq)
15928                                 break;
15929                 }
15930                 return;
15931         }
15932
15933         /* Otherwise, set values by mailbox cmd */
15934
15935         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15936         if (!mbox) {
15937                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15938                                 "6428 Failed allocating mailbox cmd buffer."
15939                                 " EQ delay was not set.\n");
15940                 return;
15941         }
15942         length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15943                   sizeof(struct lpfc_sli4_cfg_mhdr));
15944         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15945                          LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15946                          length, LPFC_SLI4_MBX_EMBED);
15947         eq_delay = &mbox->u.mqe.un.eq_delay;
15948
15949         /* Calculate delay multiper from maximum interrupt per second */
15950         dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15951         if (dmult)
15952                 dmult--;
15953         if (dmult > LPFC_DMULT_MAX)
15954                 dmult = LPFC_DMULT_MAX;
15955
15956         for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15957                 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15958                 if (!eq)
15959                         continue;
15960                 eq->q_mode = usdelay;
15961                 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15962                 eq_delay->u.request.eq[cnt].phase = 0;
15963                 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15964
15965                 if (++cnt >= numq)
15966                         break;
15967         }
15968         eq_delay->u.request.num_eq = cnt;
15969
15970         mbox->vport = phba->pport;
15971         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15972         mbox->ctx_ndlp = NULL;
15973         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15974         shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15975         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15976         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15977         if (shdr_status || shdr_add_status || rc) {
15978                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15979                                 "2512 MODIFY_EQ_DELAY mailbox failed with "
15980                                 "status x%x add_status x%x, mbx status x%x\n",
15981                                 shdr_status, shdr_add_status, rc);
15982         }
15983         mempool_free(mbox, phba->mbox_mem_pool);
15984         return;
15985 }
15986
15987 /**
15988  * lpfc_eq_create - Create an Event Queue on the HBA
15989  * @phba: HBA structure that indicates port to create a queue on.
15990  * @eq: The queue structure to use to create the event queue.
15991  * @imax: The maximum interrupt per second limit.
15992  *
15993  * This function creates an event queue, as detailed in @eq, on a port,
15994  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15995  *
15996  * The @phba struct is used to send mailbox command to HBA. The @eq struct
15997  * is used to get the entry count and entry size that are necessary to
15998  * determine the number of pages to allocate and use for this queue. This
15999  * function will send the EQ_CREATE mailbox command to the HBA to setup the
16000  * event queue. This function is asynchronous and will wait for the mailbox
16001  * command to finish before continuing.
16002  *
16003  * On success this function will return a zero. If unable to allocate enough
16004  * memory this function will return -ENOMEM. If the queue create mailbox command
16005  * fails this function will return -ENXIO.
16006  **/
16007 int
16008 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16009 {
16010         struct lpfc_mbx_eq_create *eq_create;
16011         LPFC_MBOXQ_t *mbox;
16012         int rc, length, status = 0;
16013         struct lpfc_dmabuf *dmabuf;
16014         uint32_t shdr_status, shdr_add_status;
16015         union lpfc_sli4_cfg_shdr *shdr;
16016         uint16_t dmult;
16017         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16018
16019         /* sanity check on queue memory */
16020         if (!eq)
16021                 return -ENODEV;
16022         if (!phba->sli4_hba.pc_sli4_params.supported)
16023                 hw_page_size = SLI4_PAGE_SIZE;
16024
16025         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16026         if (!mbox)
16027                 return -ENOMEM;
16028         length = (sizeof(struct lpfc_mbx_eq_create) -
16029                   sizeof(struct lpfc_sli4_cfg_mhdr));
16030         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16031                          LPFC_MBOX_OPCODE_EQ_CREATE,
16032                          length, LPFC_SLI4_MBX_EMBED);
16033         eq_create = &mbox->u.mqe.un.eq_create;
16034         shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16035         bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16036                eq->page_count);
16037         bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16038                LPFC_EQE_SIZE);
16039         bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16040
16041         /* Use version 2 of CREATE_EQ if eqav is set */
16042         if (phba->sli4_hba.pc_sli4_params.eqav) {
16043                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16044                        LPFC_Q_CREATE_VERSION_2);
16045                 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16046                        phba->sli4_hba.pc_sli4_params.eqav);
16047         }
16048
16049         /* don't setup delay multiplier using EQ_CREATE */
16050         dmult = 0;
16051         bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16052                dmult);
16053         switch (eq->entry_count) {
16054         default:
16055                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16056                                 "0360 Unsupported EQ count. (%d)\n",
16057                                 eq->entry_count);
16058                 if (eq->entry_count < 256) {
16059                         status = -EINVAL;
16060                         goto out;
16061                 }
16062                 fallthrough;    /* otherwise default to smallest count */
16063         case 256:
16064                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16065                        LPFC_EQ_CNT_256);
16066                 break;
16067         case 512:
16068                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16069                        LPFC_EQ_CNT_512);
16070                 break;
16071         case 1024:
16072                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16073                        LPFC_EQ_CNT_1024);
16074                 break;
16075         case 2048:
16076                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16077                        LPFC_EQ_CNT_2048);
16078                 break;
16079         case 4096:
16080                 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16081                        LPFC_EQ_CNT_4096);
16082                 break;
16083         }
16084         list_for_each_entry(dmabuf, &eq->page_list, list) {
16085                 memset(dmabuf->virt, 0, hw_page_size);
16086                 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16087                                         putPaddrLow(dmabuf->phys);
16088                 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16089                                         putPaddrHigh(dmabuf->phys);
16090         }
16091         mbox->vport = phba->pport;
16092         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16093         mbox->ctx_buf = NULL;
16094         mbox->ctx_ndlp = NULL;
16095         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16096         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16097         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16098         if (shdr_status || shdr_add_status || rc) {
16099                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16100                                 "2500 EQ_CREATE mailbox failed with "
16101                                 "status x%x add_status x%x, mbx status x%x\n",
16102                                 shdr_status, shdr_add_status, rc);
16103                 status = -ENXIO;
16104         }
16105         eq->type = LPFC_EQ;
16106         eq->subtype = LPFC_NONE;
16107         eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16108         if (eq->queue_id == 0xFFFF)
16109                 status = -ENXIO;
16110         eq->host_index = 0;
16111         eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16112         eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16113 out:
16114         mempool_free(mbox, phba->mbox_mem_pool);
16115         return status;
16116 }
16117
16118 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
16119 {
16120         struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
16121
16122         __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
16123
16124         return 1;
16125 }
16126
16127 /**
16128  * lpfc_cq_create - Create a Completion Queue on the HBA
16129  * @phba: HBA structure that indicates port to create a queue on.
16130  * @cq: The queue structure to use to create the completion queue.
16131  * @eq: The event queue to bind this completion queue to.
16132  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16133  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16134  *
16135  * This function creates a completion queue, as detailed in @wq, on a port,
16136  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16137  *
16138  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16139  * is used to get the entry count and entry size that are necessary to
16140  * determine the number of pages to allocate and use for this queue. The @eq
16141  * is used to indicate which event queue to bind this completion queue to. This
16142  * function will send the CQ_CREATE mailbox command to the HBA to setup the
16143  * completion queue. This function is asynchronous and will wait for the mailbox
16144  * command to finish before continuing.
16145  *
16146  * On success this function will return a zero. If unable to allocate enough
16147  * memory this function will return -ENOMEM. If the queue create mailbox command
16148  * fails this function will return -ENXIO.
16149  **/
16150 int
16151 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16152                struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16153 {
16154         struct lpfc_mbx_cq_create *cq_create;
16155         struct lpfc_dmabuf *dmabuf;
16156         LPFC_MBOXQ_t *mbox;
16157         int rc, length, status = 0;
16158         uint32_t shdr_status, shdr_add_status;
16159         union lpfc_sli4_cfg_shdr *shdr;
16160
16161         /* sanity check on queue memory */
16162         if (!cq || !eq)
16163                 return -ENODEV;
16164
16165         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16166         if (!mbox)
16167                 return -ENOMEM;
16168         length = (sizeof(struct lpfc_mbx_cq_create) -
16169                   sizeof(struct lpfc_sli4_cfg_mhdr));
16170         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16171                          LPFC_MBOX_OPCODE_CQ_CREATE,
16172                          length, LPFC_SLI4_MBX_EMBED);
16173         cq_create = &mbox->u.mqe.un.cq_create;
16174         shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16175         bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16176                     cq->page_count);
16177         bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16178         bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16179         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16180                phba->sli4_hba.pc_sli4_params.cqv);
16181         if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16182                 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16183                        (cq->page_size / SLI4_PAGE_SIZE));
16184                 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16185                        eq->queue_id);
16186                 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16187                        phba->sli4_hba.pc_sli4_params.cqav);
16188         } else {
16189                 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16190                        eq->queue_id);
16191         }
16192         switch (cq->entry_count) {
16193         case 2048:
16194         case 4096:
16195                 if (phba->sli4_hba.pc_sli4_params.cqv ==
16196                     LPFC_Q_CREATE_VERSION_2) {
16197                         cq_create->u.request.context.lpfc_cq_context_count =
16198                                 cq->entry_count;
16199                         bf_set(lpfc_cq_context_count,
16200                                &cq_create->u.request.context,
16201                                LPFC_CQ_CNT_WORD7);
16202                         break;
16203                 }
16204                 fallthrough;
16205         default:
16206                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16207                                 "0361 Unsupported CQ count: "
16208                                 "entry cnt %d sz %d pg cnt %d\n",
16209                                 cq->entry_count, cq->entry_size,
16210                                 cq->page_count);
16211                 if (cq->entry_count < 256) {
16212                         status = -EINVAL;
16213                         goto out;
16214                 }
16215                 fallthrough;    /* otherwise default to smallest count */
16216         case 256:
16217                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16218                        LPFC_CQ_CNT_256);
16219                 break;
16220         case 512:
16221                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16222                        LPFC_CQ_CNT_512);
16223                 break;
16224         case 1024:
16225                 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16226                        LPFC_CQ_CNT_1024);
16227                 break;
16228         }
16229         list_for_each_entry(dmabuf, &cq->page_list, list) {
16230                 memset(dmabuf->virt, 0, cq->page_size);
16231                 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16232                                         putPaddrLow(dmabuf->phys);
16233                 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16234                                         putPaddrHigh(dmabuf->phys);
16235         }
16236         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16237
16238         /* The IOCTL status is embedded in the mailbox subheader. */
16239         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16240         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16241         if (shdr_status || shdr_add_status || rc) {
16242                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16243                                 "2501 CQ_CREATE mailbox failed with "
16244                                 "status x%x add_status x%x, mbx status x%x\n",
16245                                 shdr_status, shdr_add_status, rc);
16246                 status = -ENXIO;
16247                 goto out;
16248         }
16249         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16250         if (cq->queue_id == 0xFFFF) {
16251                 status = -ENXIO;
16252                 goto out;
16253         }
16254         /* link the cq onto the parent eq child list */
16255         list_add_tail(&cq->list, &eq->child_list);
16256         /* Set up completion queue's type and subtype */
16257         cq->type = type;
16258         cq->subtype = subtype;
16259         cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16260         cq->assoc_qid = eq->queue_id;
16261         cq->assoc_qp = eq;
16262         cq->host_index = 0;
16263         cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16264         cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16265
16266         if (cq->queue_id > phba->sli4_hba.cq_max)
16267                 phba->sli4_hba.cq_max = cq->queue_id;
16268
16269         irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16270 out:
16271         mempool_free(mbox, phba->mbox_mem_pool);
16272         return status;
16273 }
16274
16275 /**
16276  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16277  * @phba: HBA structure that indicates port to create a queue on.
16278  * @cqp: The queue structure array to use to create the completion queues.
16279  * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
16280  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16281  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16282  *
16283  * This function creates a set of  completion queue, s to support MRQ
16284  * as detailed in @cqp, on a port,
16285  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16286  *
16287  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16288  * is used to get the entry count and entry size that are necessary to
16289  * determine the number of pages to allocate and use for this queue. The @eq
16290  * is used to indicate which event queue to bind this completion queue to. This
16291  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16292  * completion queue. This function is asynchronous and will wait for the mailbox
16293  * command to finish before continuing.
16294  *
16295  * On success this function will return a zero. If unable to allocate enough
16296  * memory this function will return -ENOMEM. If the queue create mailbox command
16297  * fails this function will return -ENXIO.
16298  **/
16299 int
16300 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16301                    struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16302                    uint32_t subtype)
16303 {
16304         struct lpfc_queue *cq;
16305         struct lpfc_queue *eq;
16306         struct lpfc_mbx_cq_create_set *cq_set;
16307         struct lpfc_dmabuf *dmabuf;
16308         LPFC_MBOXQ_t *mbox;
16309         int rc, length, alloclen, status = 0;
16310         int cnt, idx, numcq, page_idx = 0;
16311         uint32_t shdr_status, shdr_add_status;
16312         union lpfc_sli4_cfg_shdr *shdr;
16313         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16314
16315         /* sanity check on queue memory */
16316         numcq = phba->cfg_nvmet_mrq;
16317         if (!cqp || !hdwq || !numcq)
16318                 return -ENODEV;
16319
16320         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16321         if (!mbox)
16322                 return -ENOMEM;
16323
16324         length = sizeof(struct lpfc_mbx_cq_create_set);
16325         length += ((numcq * cqp[0]->page_count) *
16326                    sizeof(struct dma_address));
16327         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16328                         LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16329                         LPFC_SLI4_MBX_NEMBED);
16330         if (alloclen < length) {
16331                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16332                                 "3098 Allocated DMA memory size (%d) is "
16333                                 "less than the requested DMA memory size "
16334                                 "(%d)\n", alloclen, length);
16335                 status = -ENOMEM;
16336                 goto out;
16337         }
16338         cq_set = mbox->sge_array->addr[0];
16339         shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16340         bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16341
16342         for (idx = 0; idx < numcq; idx++) {
16343                 cq = cqp[idx];
16344                 eq = hdwq[idx].hba_eq;
16345                 if (!cq || !eq) {
16346                         status = -ENOMEM;
16347                         goto out;
16348                 }
16349                 if (!phba->sli4_hba.pc_sli4_params.supported)
16350                         hw_page_size = cq->page_size;
16351
16352                 switch (idx) {
16353                 case 0:
16354                         bf_set(lpfc_mbx_cq_create_set_page_size,
16355                                &cq_set->u.request,
16356                                (hw_page_size / SLI4_PAGE_SIZE));
16357                         bf_set(lpfc_mbx_cq_create_set_num_pages,
16358                                &cq_set->u.request, cq->page_count);
16359                         bf_set(lpfc_mbx_cq_create_set_evt,
16360                                &cq_set->u.request, 1);
16361                         bf_set(lpfc_mbx_cq_create_set_valid,
16362                                &cq_set->u.request, 1);
16363                         bf_set(lpfc_mbx_cq_create_set_cqe_size,
16364                                &cq_set->u.request, 0);
16365                         bf_set(lpfc_mbx_cq_create_set_num_cq,
16366                                &cq_set->u.request, numcq);
16367                         bf_set(lpfc_mbx_cq_create_set_autovalid,
16368                                &cq_set->u.request,
16369                                phba->sli4_hba.pc_sli4_params.cqav);
16370                         switch (cq->entry_count) {
16371                         case 2048:
16372                         case 4096:
16373                                 if (phba->sli4_hba.pc_sli4_params.cqv ==
16374                                     LPFC_Q_CREATE_VERSION_2) {
16375                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16376                                                &cq_set->u.request,
16377                                                 cq->entry_count);
16378                                         bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16379                                                &cq_set->u.request,
16380                                                LPFC_CQ_CNT_WORD7);
16381                                         break;
16382                                 }
16383                                 fallthrough;
16384                         default:
16385                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16386                                                 "3118 Bad CQ count. (%d)\n",
16387                                                 cq->entry_count);
16388                                 if (cq->entry_count < 256) {
16389                                         status = -EINVAL;
16390                                         goto out;
16391                                 }
16392                                 fallthrough;    /* otherwise default to smallest */
16393                         case 256:
16394                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16395                                        &cq_set->u.request, LPFC_CQ_CNT_256);
16396                                 break;
16397                         case 512:
16398                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16399                                        &cq_set->u.request, LPFC_CQ_CNT_512);
16400                                 break;
16401                         case 1024:
16402                                 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16403                                        &cq_set->u.request, LPFC_CQ_CNT_1024);
16404                                 break;
16405                         }
16406                         bf_set(lpfc_mbx_cq_create_set_eq_id0,
16407                                &cq_set->u.request, eq->queue_id);
16408                         break;
16409                 case 1:
16410                         bf_set(lpfc_mbx_cq_create_set_eq_id1,
16411                                &cq_set->u.request, eq->queue_id);
16412                         break;
16413                 case 2:
16414                         bf_set(lpfc_mbx_cq_create_set_eq_id2,
16415                                &cq_set->u.request, eq->queue_id);
16416                         break;
16417                 case 3:
16418                         bf_set(lpfc_mbx_cq_create_set_eq_id3,
16419                                &cq_set->u.request, eq->queue_id);
16420                         break;
16421                 case 4:
16422                         bf_set(lpfc_mbx_cq_create_set_eq_id4,
16423                                &cq_set->u.request, eq->queue_id);
16424                         break;
16425                 case 5:
16426                         bf_set(lpfc_mbx_cq_create_set_eq_id5,
16427                                &cq_set->u.request, eq->queue_id);
16428                         break;
16429                 case 6:
16430                         bf_set(lpfc_mbx_cq_create_set_eq_id6,
16431                                &cq_set->u.request, eq->queue_id);
16432                         break;
16433                 case 7:
16434                         bf_set(lpfc_mbx_cq_create_set_eq_id7,
16435                                &cq_set->u.request, eq->queue_id);
16436                         break;
16437                 case 8:
16438                         bf_set(lpfc_mbx_cq_create_set_eq_id8,
16439                                &cq_set->u.request, eq->queue_id);
16440                         break;
16441                 case 9:
16442                         bf_set(lpfc_mbx_cq_create_set_eq_id9,
16443                                &cq_set->u.request, eq->queue_id);
16444                         break;
16445                 case 10:
16446                         bf_set(lpfc_mbx_cq_create_set_eq_id10,
16447                                &cq_set->u.request, eq->queue_id);
16448                         break;
16449                 case 11:
16450                         bf_set(lpfc_mbx_cq_create_set_eq_id11,
16451                                &cq_set->u.request, eq->queue_id);
16452                         break;
16453                 case 12:
16454                         bf_set(lpfc_mbx_cq_create_set_eq_id12,
16455                                &cq_set->u.request, eq->queue_id);
16456                         break;
16457                 case 13:
16458                         bf_set(lpfc_mbx_cq_create_set_eq_id13,
16459                                &cq_set->u.request, eq->queue_id);
16460                         break;
16461                 case 14:
16462                         bf_set(lpfc_mbx_cq_create_set_eq_id14,
16463                                &cq_set->u.request, eq->queue_id);
16464                         break;
16465                 case 15:
16466                         bf_set(lpfc_mbx_cq_create_set_eq_id15,
16467                                &cq_set->u.request, eq->queue_id);
16468                         break;
16469                 }
16470
16471                 /* link the cq onto the parent eq child list */
16472                 list_add_tail(&cq->list, &eq->child_list);
16473                 /* Set up completion queue's type and subtype */
16474                 cq->type = type;
16475                 cq->subtype = subtype;
16476                 cq->assoc_qid = eq->queue_id;
16477                 cq->assoc_qp = eq;
16478                 cq->host_index = 0;
16479                 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16480                 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16481                                          cq->entry_count);
16482                 cq->chann = idx;
16483
16484                 rc = 0;
16485                 list_for_each_entry(dmabuf, &cq->page_list, list) {
16486                         memset(dmabuf->virt, 0, hw_page_size);
16487                         cnt = page_idx + dmabuf->buffer_tag;
16488                         cq_set->u.request.page[cnt].addr_lo =
16489                                         putPaddrLow(dmabuf->phys);
16490                         cq_set->u.request.page[cnt].addr_hi =
16491                                         putPaddrHigh(dmabuf->phys);
16492                         rc++;
16493                 }
16494                 page_idx += rc;
16495         }
16496
16497         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16498
16499         /* The IOCTL status is embedded in the mailbox subheader. */
16500         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16501         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16502         if (shdr_status || shdr_add_status || rc) {
16503                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16504                                 "3119 CQ_CREATE_SET mailbox failed with "
16505                                 "status x%x add_status x%x, mbx status x%x\n",
16506                                 shdr_status, shdr_add_status, rc);
16507                 status = -ENXIO;
16508                 goto out;
16509         }
16510         rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16511         if (rc == 0xFFFF) {
16512                 status = -ENXIO;
16513                 goto out;
16514         }
16515
16516         for (idx = 0; idx < numcq; idx++) {
16517                 cq = cqp[idx];
16518                 cq->queue_id = rc + idx;
16519                 if (cq->queue_id > phba->sli4_hba.cq_max)
16520                         phba->sli4_hba.cq_max = cq->queue_id;
16521         }
16522
16523 out:
16524         lpfc_sli4_mbox_cmd_free(phba, mbox);
16525         return status;
16526 }
16527
16528 /**
16529  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16530  * @phba: HBA structure that indicates port to create a queue on.
16531  * @mq: The queue structure to use to create the mailbox queue.
16532  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16533  * @cq: The completion queue to associate with this cq.
16534  *
16535  * This function provides failback (fb) functionality when the
16536  * mq_create_ext fails on older FW generations.  It's purpose is identical
16537  * to mq_create_ext otherwise.
16538  *
16539  * This routine cannot fail as all attributes were previously accessed and
16540  * initialized in mq_create_ext.
16541  **/
16542 static void
16543 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16544                        LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16545 {
16546         struct lpfc_mbx_mq_create *mq_create;
16547         struct lpfc_dmabuf *dmabuf;
16548         int length;
16549
16550         length = (sizeof(struct lpfc_mbx_mq_create) -
16551                   sizeof(struct lpfc_sli4_cfg_mhdr));
16552         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16553                          LPFC_MBOX_OPCODE_MQ_CREATE,
16554                          length, LPFC_SLI4_MBX_EMBED);
16555         mq_create = &mbox->u.mqe.un.mq_create;
16556         bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16557                mq->page_count);
16558         bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16559                cq->queue_id);
16560         bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16561         switch (mq->entry_count) {
16562         case 16:
16563                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16564                        LPFC_MQ_RING_SIZE_16);
16565                 break;
16566         case 32:
16567                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16568                        LPFC_MQ_RING_SIZE_32);
16569                 break;
16570         case 64:
16571                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16572                        LPFC_MQ_RING_SIZE_64);
16573                 break;
16574         case 128:
16575                 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16576                        LPFC_MQ_RING_SIZE_128);
16577                 break;
16578         }
16579         list_for_each_entry(dmabuf, &mq->page_list, list) {
16580                 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16581                         putPaddrLow(dmabuf->phys);
16582                 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16583                         putPaddrHigh(dmabuf->phys);
16584         }
16585 }
16586
16587 /**
16588  * lpfc_mq_create - Create a mailbox Queue on the HBA
16589  * @phba: HBA structure that indicates port to create a queue on.
16590  * @mq: The queue structure to use to create the mailbox queue.
16591  * @cq: The completion queue to associate with this cq.
16592  * @subtype: The queue's subtype.
16593  *
16594  * This function creates a mailbox queue, as detailed in @mq, on a port,
16595  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16596  *
16597  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16598  * is used to get the entry count and entry size that are necessary to
16599  * determine the number of pages to allocate and use for this queue. This
16600  * function will send the MQ_CREATE mailbox command to the HBA to setup the
16601  * mailbox queue. This function is asynchronous and will wait for the mailbox
16602  * command to finish before continuing.
16603  *
16604  * On success this function will return a zero. If unable to allocate enough
16605  * memory this function will return -ENOMEM. If the queue create mailbox command
16606  * fails this function will return -ENXIO.
16607  **/
16608 int32_t
16609 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16610                struct lpfc_queue *cq, uint32_t subtype)
16611 {
16612         struct lpfc_mbx_mq_create *mq_create;
16613         struct lpfc_mbx_mq_create_ext *mq_create_ext;
16614         struct lpfc_dmabuf *dmabuf;
16615         LPFC_MBOXQ_t *mbox;
16616         int rc, length, status = 0;
16617         uint32_t shdr_status, shdr_add_status;
16618         union lpfc_sli4_cfg_shdr *shdr;
16619         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16620
16621         /* sanity check on queue memory */
16622         if (!mq || !cq)
16623                 return -ENODEV;
16624         if (!phba->sli4_hba.pc_sli4_params.supported)
16625                 hw_page_size = SLI4_PAGE_SIZE;
16626
16627         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16628         if (!mbox)
16629                 return -ENOMEM;
16630         length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16631                   sizeof(struct lpfc_sli4_cfg_mhdr));
16632         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16633                          LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16634                          length, LPFC_SLI4_MBX_EMBED);
16635
16636         mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16637         shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16638         bf_set(lpfc_mbx_mq_create_ext_num_pages,
16639                &mq_create_ext->u.request, mq->page_count);
16640         bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16641                &mq_create_ext->u.request, 1);
16642         bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16643                &mq_create_ext->u.request, 1);
16644         bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16645                &mq_create_ext->u.request, 1);
16646         bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16647                &mq_create_ext->u.request, 1);
16648         bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16649                &mq_create_ext->u.request, 1);
16650         bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16651         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16652                phba->sli4_hba.pc_sli4_params.mqv);
16653         if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16654                 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16655                        cq->queue_id);
16656         else
16657                 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16658                        cq->queue_id);
16659         switch (mq->entry_count) {
16660         default:
16661                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16662                                 "0362 Unsupported MQ count. (%d)\n",
16663                                 mq->entry_count);
16664                 if (mq->entry_count < 16) {
16665                         status = -EINVAL;
16666                         goto out;
16667                 }
16668                 fallthrough;    /* otherwise default to smallest count */
16669         case 16:
16670                 bf_set(lpfc_mq_context_ring_size,
16671                        &mq_create_ext->u.request.context,
16672                        LPFC_MQ_RING_SIZE_16);
16673                 break;
16674         case 32:
16675                 bf_set(lpfc_mq_context_ring_size,
16676                        &mq_create_ext->u.request.context,
16677                        LPFC_MQ_RING_SIZE_32);
16678                 break;
16679         case 64:
16680                 bf_set(lpfc_mq_context_ring_size,
16681                        &mq_create_ext->u.request.context,
16682                        LPFC_MQ_RING_SIZE_64);
16683                 break;
16684         case 128:
16685                 bf_set(lpfc_mq_context_ring_size,
16686                        &mq_create_ext->u.request.context,
16687                        LPFC_MQ_RING_SIZE_128);
16688                 break;
16689         }
16690         list_for_each_entry(dmabuf, &mq->page_list, list) {
16691                 memset(dmabuf->virt, 0, hw_page_size);
16692                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16693                                         putPaddrLow(dmabuf->phys);
16694                 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16695                                         putPaddrHigh(dmabuf->phys);
16696         }
16697         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16698         mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16699                               &mq_create_ext->u.response);
16700         if (rc != MBX_SUCCESS) {
16701                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16702                                 "2795 MQ_CREATE_EXT failed with "
16703                                 "status x%x. Failback to MQ_CREATE.\n",
16704                                 rc);
16705                 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16706                 mq_create = &mbox->u.mqe.un.mq_create;
16707                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16708                 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16709                 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16710                                       &mq_create->u.response);
16711         }
16712
16713         /* The IOCTL status is embedded in the mailbox subheader. */
16714         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16715         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16716         if (shdr_status || shdr_add_status || rc) {
16717                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16718                                 "2502 MQ_CREATE mailbox failed with "
16719                                 "status x%x add_status x%x, mbx status x%x\n",
16720                                 shdr_status, shdr_add_status, rc);
16721                 status = -ENXIO;
16722                 goto out;
16723         }
16724         if (mq->queue_id == 0xFFFF) {
16725                 status = -ENXIO;
16726                 goto out;
16727         }
16728         mq->type = LPFC_MQ;
16729         mq->assoc_qid = cq->queue_id;
16730         mq->subtype = subtype;
16731         mq->host_index = 0;
16732         mq->hba_index = 0;
16733
16734         /* link the mq onto the parent cq child list */
16735         list_add_tail(&mq->list, &cq->child_list);
16736 out:
16737         mempool_free(mbox, phba->mbox_mem_pool);
16738         return status;
16739 }
16740
16741 /**
16742  * lpfc_wq_create - Create a Work Queue on the HBA
16743  * @phba: HBA structure that indicates port to create a queue on.
16744  * @wq: The queue structure to use to create the work queue.
16745  * @cq: The completion queue to bind this work queue to.
16746  * @subtype: The subtype of the work queue indicating its functionality.
16747  *
16748  * This function creates a work queue, as detailed in @wq, on a port, described
16749  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16750  *
16751  * The @phba struct is used to send mailbox command to HBA. The @wq struct
16752  * is used to get the entry count and entry size that are necessary to
16753  * determine the number of pages to allocate and use for this queue. The @cq
16754  * is used to indicate which completion queue to bind this work queue to. This
16755  * function will send the WQ_CREATE mailbox command to the HBA to setup the
16756  * work queue. This function is asynchronous and will wait for the mailbox
16757  * command to finish before continuing.
16758  *
16759  * On success this function will return a zero. If unable to allocate enough
16760  * memory this function will return -ENOMEM. If the queue create mailbox command
16761  * fails this function will return -ENXIO.
16762  **/
16763 int
16764 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16765                struct lpfc_queue *cq, uint32_t subtype)
16766 {
16767         struct lpfc_mbx_wq_create *wq_create;
16768         struct lpfc_dmabuf *dmabuf;
16769         LPFC_MBOXQ_t *mbox;
16770         int rc, length, status = 0;
16771         uint32_t shdr_status, shdr_add_status;
16772         union lpfc_sli4_cfg_shdr *shdr;
16773         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16774         struct dma_address *page;
16775         void __iomem *bar_memmap_p;
16776         uint32_t db_offset;
16777         uint16_t pci_barset;
16778         uint8_t dpp_barset;
16779         uint32_t dpp_offset;
16780         uint8_t wq_create_version;
16781 #ifdef CONFIG_X86
16782         unsigned long pg_addr;
16783 #endif
16784
16785         /* sanity check on queue memory */
16786         if (!wq || !cq)
16787                 return -ENODEV;
16788         if (!phba->sli4_hba.pc_sli4_params.supported)
16789                 hw_page_size = wq->page_size;
16790
16791         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16792         if (!mbox)
16793                 return -ENOMEM;
16794         length = (sizeof(struct lpfc_mbx_wq_create) -
16795                   sizeof(struct lpfc_sli4_cfg_mhdr));
16796         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16797                          LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16798                          length, LPFC_SLI4_MBX_EMBED);
16799         wq_create = &mbox->u.mqe.un.wq_create;
16800         shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16801         bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16802                     wq->page_count);
16803         bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16804                     cq->queue_id);
16805
16806         /* wqv is the earliest version supported, NOT the latest */
16807         bf_set(lpfc_mbox_hdr_version, &shdr->request,
16808                phba->sli4_hba.pc_sli4_params.wqv);
16809
16810         if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16811             (wq->page_size > SLI4_PAGE_SIZE))
16812                 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16813         else
16814                 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16815
16816         switch (wq_create_version) {
16817         case LPFC_Q_CREATE_VERSION_1:
16818                 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16819                        wq->entry_count);
16820                 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16821                        LPFC_Q_CREATE_VERSION_1);
16822
16823                 switch (wq->entry_size) {
16824                 default:
16825                 case 64:
16826                         bf_set(lpfc_mbx_wq_create_wqe_size,
16827                                &wq_create->u.request_1,
16828                                LPFC_WQ_WQE_SIZE_64);
16829                         break;
16830                 case 128:
16831                         bf_set(lpfc_mbx_wq_create_wqe_size,
16832                                &wq_create->u.request_1,
16833                                LPFC_WQ_WQE_SIZE_128);
16834                         break;
16835                 }
16836                 /* Request DPP by default */
16837                 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16838                 bf_set(lpfc_mbx_wq_create_page_size,
16839                        &wq_create->u.request_1,
16840                        (wq->page_size / SLI4_PAGE_SIZE));
16841                 page = wq_create->u.request_1.page;
16842                 break;
16843         default:
16844                 page = wq_create->u.request.page;
16845                 break;
16846         }
16847
16848         list_for_each_entry(dmabuf, &wq->page_list, list) {
16849                 memset(dmabuf->virt, 0, hw_page_size);
16850                 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16851                 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16852         }
16853
16854         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16855                 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16856
16857         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16858         /* The IOCTL status is embedded in the mailbox subheader. */
16859         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16860         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16861         if (shdr_status || shdr_add_status || rc) {
16862                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16863                                 "2503 WQ_CREATE mailbox failed with "
16864                                 "status x%x add_status x%x, mbx status x%x\n",
16865                                 shdr_status, shdr_add_status, rc);
16866                 status = -ENXIO;
16867                 goto out;
16868         }
16869
16870         if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16871                 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16872                                         &wq_create->u.response);
16873         else
16874                 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16875                                         &wq_create->u.response_1);
16876
16877         if (wq->queue_id == 0xFFFF) {
16878                 status = -ENXIO;
16879                 goto out;
16880         }
16881
16882         wq->db_format = LPFC_DB_LIST_FORMAT;
16883         if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16884                 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16885                         wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16886                                                &wq_create->u.response);
16887                         if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16888                             (wq->db_format != LPFC_DB_RING_FORMAT)) {
16889                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16890                                                 "3265 WQ[%d] doorbell format "
16891                                                 "not supported: x%x\n",
16892                                                 wq->queue_id, wq->db_format);
16893                                 status = -EINVAL;
16894                                 goto out;
16895                         }
16896                         pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16897                                             &wq_create->u.response);
16898                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16899                                                                    pci_barset);
16900                         if (!bar_memmap_p) {
16901                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16902                                                 "3263 WQ[%d] failed to memmap "
16903                                                 "pci barset:x%x\n",
16904                                                 wq->queue_id, pci_barset);
16905                                 status = -ENOMEM;
16906                                 goto out;
16907                         }
16908                         db_offset = wq_create->u.response.doorbell_offset;
16909                         if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16910                             (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16911                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16912                                                 "3252 WQ[%d] doorbell offset "
16913                                                 "not supported: x%x\n",
16914                                                 wq->queue_id, db_offset);
16915                                 status = -EINVAL;
16916                                 goto out;
16917                         }
16918                         wq->db_regaddr = bar_memmap_p + db_offset;
16919                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16920                                         "3264 WQ[%d]: barset:x%x, offset:x%x, "
16921                                         "format:x%x\n", wq->queue_id,
16922                                         pci_barset, db_offset, wq->db_format);
16923                 } else
16924                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16925         } else {
16926                 /* Check if DPP was honored by the firmware */
16927                 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16928                                     &wq_create->u.response_1);
16929                 if (wq->dpp_enable) {
16930                         pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16931                                             &wq_create->u.response_1);
16932                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16933                                                                    pci_barset);
16934                         if (!bar_memmap_p) {
16935                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16936                                                 "3267 WQ[%d] failed to memmap "
16937                                                 "pci barset:x%x\n",
16938                                                 wq->queue_id, pci_barset);
16939                                 status = -ENOMEM;
16940                                 goto out;
16941                         }
16942                         db_offset = wq_create->u.response_1.doorbell_offset;
16943                         wq->db_regaddr = bar_memmap_p + db_offset;
16944                         wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16945                                             &wq_create->u.response_1);
16946                         dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16947                                             &wq_create->u.response_1);
16948                         bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16949                                                                    dpp_barset);
16950                         if (!bar_memmap_p) {
16951                                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16952                                                 "3268 WQ[%d] failed to memmap "
16953                                                 "pci barset:x%x\n",
16954                                                 wq->queue_id, dpp_barset);
16955                                 status = -ENOMEM;
16956                                 goto out;
16957                         }
16958                         dpp_offset = wq_create->u.response_1.dpp_offset;
16959                         wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16960                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16961                                         "3271 WQ[%d]: barset:x%x, offset:x%x, "
16962                                         "dpp_id:x%x dpp_barset:x%x "
16963                                         "dpp_offset:x%x\n",
16964                                         wq->queue_id, pci_barset, db_offset,
16965                                         wq->dpp_id, dpp_barset, dpp_offset);
16966
16967 #ifdef CONFIG_X86
16968                         /* Enable combined writes for DPP aperture */
16969                         pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16970                         rc = set_memory_wc(pg_addr, 1);
16971                         if (rc) {
16972                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16973                                         "3272 Cannot setup Combined "
16974                                         "Write on WQ[%d] - disable DPP\n",
16975                                         wq->queue_id);
16976                                 phba->cfg_enable_dpp = 0;
16977                         }
16978 #else
16979                         phba->cfg_enable_dpp = 0;
16980 #endif
16981                 } else
16982                         wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16983         }
16984         wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16985         if (wq->pring == NULL) {
16986                 status = -ENOMEM;
16987                 goto out;
16988         }
16989         wq->type = LPFC_WQ;
16990         wq->assoc_qid = cq->queue_id;
16991         wq->subtype = subtype;
16992         wq->host_index = 0;
16993         wq->hba_index = 0;
16994         wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16995
16996         /* link the wq onto the parent cq child list */
16997         list_add_tail(&wq->list, &cq->child_list);
16998 out:
16999         mempool_free(mbox, phba->mbox_mem_pool);
17000         return status;
17001 }
17002
17003 /**
17004  * lpfc_rq_create - Create a Receive Queue on the HBA
17005  * @phba: HBA structure that indicates port to create a queue on.
17006  * @hrq: The queue structure to use to create the header receive queue.
17007  * @drq: The queue structure to use to create the data receive queue.
17008  * @cq: The completion queue to bind this work queue to.
17009  * @subtype: The subtype of the work queue indicating its functionality.
17010  *
17011  * This function creates a receive buffer queue pair , as detailed in @hrq and
17012  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17013  * to the HBA.
17014  *
17015  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17016  * struct is used to get the entry count that is necessary to determine the
17017  * number of pages to use for this queue. The @cq is used to indicate which
17018  * completion queue to bind received buffers that are posted to these queues to.
17019  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17020  * receive queue pair. This function is asynchronous and will wait for the
17021  * mailbox command to finish before continuing.
17022  *
17023  * On success this function will return a zero. If unable to allocate enough
17024  * memory this function will return -ENOMEM. If the queue create mailbox command
17025  * fails this function will return -ENXIO.
17026  **/
17027 int
17028 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17029                struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17030 {
17031         struct lpfc_mbx_rq_create *rq_create;
17032         struct lpfc_dmabuf *dmabuf;
17033         LPFC_MBOXQ_t *mbox;
17034         int rc, length, status = 0;
17035         uint32_t shdr_status, shdr_add_status;
17036         union lpfc_sli4_cfg_shdr *shdr;
17037         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17038         void __iomem *bar_memmap_p;
17039         uint32_t db_offset;
17040         uint16_t pci_barset;
17041
17042         /* sanity check on queue memory */
17043         if (!hrq || !drq || !cq)
17044                 return -ENODEV;
17045         if (!phba->sli4_hba.pc_sli4_params.supported)
17046                 hw_page_size = SLI4_PAGE_SIZE;
17047
17048         if (hrq->entry_count != drq->entry_count)
17049                 return -EINVAL;
17050         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17051         if (!mbox)
17052                 return -ENOMEM;
17053         length = (sizeof(struct lpfc_mbx_rq_create) -
17054                   sizeof(struct lpfc_sli4_cfg_mhdr));
17055         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17056                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17057                          length, LPFC_SLI4_MBX_EMBED);
17058         rq_create = &mbox->u.mqe.un.rq_create;
17059         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17060         bf_set(lpfc_mbox_hdr_version, &shdr->request,
17061                phba->sli4_hba.pc_sli4_params.rqv);
17062         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17063                 bf_set(lpfc_rq_context_rqe_count_1,
17064                        &rq_create->u.request.context,
17065                        hrq->entry_count);
17066                 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17067                 bf_set(lpfc_rq_context_rqe_size,
17068                        &rq_create->u.request.context,
17069                        LPFC_RQE_SIZE_8);
17070                 bf_set(lpfc_rq_context_page_size,
17071                        &rq_create->u.request.context,
17072                        LPFC_RQ_PAGE_SIZE_4096);
17073         } else {
17074                 switch (hrq->entry_count) {
17075                 default:
17076                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17077                                         "2535 Unsupported RQ count. (%d)\n",
17078                                         hrq->entry_count);
17079                         if (hrq->entry_count < 512) {
17080                                 status = -EINVAL;
17081                                 goto out;
17082                         }
17083                         fallthrough;    /* otherwise default to smallest count */
17084                 case 512:
17085                         bf_set(lpfc_rq_context_rqe_count,
17086                                &rq_create->u.request.context,
17087                                LPFC_RQ_RING_SIZE_512);
17088                         break;
17089                 case 1024:
17090                         bf_set(lpfc_rq_context_rqe_count,
17091                                &rq_create->u.request.context,
17092                                LPFC_RQ_RING_SIZE_1024);
17093                         break;
17094                 case 2048:
17095                         bf_set(lpfc_rq_context_rqe_count,
17096                                &rq_create->u.request.context,
17097                                LPFC_RQ_RING_SIZE_2048);
17098                         break;
17099                 case 4096:
17100                         bf_set(lpfc_rq_context_rqe_count,
17101                                &rq_create->u.request.context,
17102                                LPFC_RQ_RING_SIZE_4096);
17103                         break;
17104                 }
17105                 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17106                        LPFC_HDR_BUF_SIZE);
17107         }
17108         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17109                cq->queue_id);
17110         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17111                hrq->page_count);
17112         list_for_each_entry(dmabuf, &hrq->page_list, list) {
17113                 memset(dmabuf->virt, 0, hw_page_size);
17114                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17115                                         putPaddrLow(dmabuf->phys);
17116                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17117                                         putPaddrHigh(dmabuf->phys);
17118         }
17119         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17120                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17121
17122         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17123         /* The IOCTL status is embedded in the mailbox subheader. */
17124         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17125         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17126         if (shdr_status || shdr_add_status || rc) {
17127                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17128                                 "2504 RQ_CREATE mailbox failed with "
17129                                 "status x%x add_status x%x, mbx status x%x\n",
17130                                 shdr_status, shdr_add_status, rc);
17131                 status = -ENXIO;
17132                 goto out;
17133         }
17134         hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17135         if (hrq->queue_id == 0xFFFF) {
17136                 status = -ENXIO;
17137                 goto out;
17138         }
17139
17140         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17141                 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17142                                         &rq_create->u.response);
17143                 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17144                     (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17145                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17146                                         "3262 RQ [%d] doorbell format not "
17147                                         "supported: x%x\n", hrq->queue_id,
17148                                         hrq->db_format);
17149                         status = -EINVAL;
17150                         goto out;
17151                 }
17152
17153                 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17154                                     &rq_create->u.response);
17155                 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17156                 if (!bar_memmap_p) {
17157                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17158                                         "3269 RQ[%d] failed to memmap pci "
17159                                         "barset:x%x\n", hrq->queue_id,
17160                                         pci_barset);
17161                         status = -ENOMEM;
17162                         goto out;
17163                 }
17164
17165                 db_offset = rq_create->u.response.doorbell_offset;
17166                 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17167                     (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17168                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17169                                         "3270 RQ[%d] doorbell offset not "
17170                                         "supported: x%x\n", hrq->queue_id,
17171                                         db_offset);
17172                         status = -EINVAL;
17173                         goto out;
17174                 }
17175                 hrq->db_regaddr = bar_memmap_p + db_offset;
17176                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17177                                 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17178                                 "format:x%x\n", hrq->queue_id, pci_barset,
17179                                 db_offset, hrq->db_format);
17180         } else {
17181                 hrq->db_format = LPFC_DB_RING_FORMAT;
17182                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17183         }
17184         hrq->type = LPFC_HRQ;
17185         hrq->assoc_qid = cq->queue_id;
17186         hrq->subtype = subtype;
17187         hrq->host_index = 0;
17188         hrq->hba_index = 0;
17189         hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17190
17191         /* now create the data queue */
17192         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17193                          LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17194                          length, LPFC_SLI4_MBX_EMBED);
17195         bf_set(lpfc_mbox_hdr_version, &shdr->request,
17196                phba->sli4_hba.pc_sli4_params.rqv);
17197         if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17198                 bf_set(lpfc_rq_context_rqe_count_1,
17199                        &rq_create->u.request.context, hrq->entry_count);
17200                 if (subtype == LPFC_NVMET)
17201                         rq_create->u.request.context.buffer_size =
17202                                 LPFC_NVMET_DATA_BUF_SIZE;
17203                 else
17204                         rq_create->u.request.context.buffer_size =
17205                                 LPFC_DATA_BUF_SIZE;
17206                 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17207                        LPFC_RQE_SIZE_8);
17208                 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17209                        (PAGE_SIZE/SLI4_PAGE_SIZE));
17210         } else {
17211                 switch (drq->entry_count) {
17212                 default:
17213                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17214                                         "2536 Unsupported RQ count. (%d)\n",
17215                                         drq->entry_count);
17216                         if (drq->entry_count < 512) {
17217                                 status = -EINVAL;
17218                                 goto out;
17219                         }
17220                         fallthrough;    /* otherwise default to smallest count */
17221                 case 512:
17222                         bf_set(lpfc_rq_context_rqe_count,
17223                                &rq_create->u.request.context,
17224                                LPFC_RQ_RING_SIZE_512);
17225                         break;
17226                 case 1024:
17227                         bf_set(lpfc_rq_context_rqe_count,
17228                                &rq_create->u.request.context,
17229                                LPFC_RQ_RING_SIZE_1024);
17230                         break;
17231                 case 2048:
17232                         bf_set(lpfc_rq_context_rqe_count,
17233                                &rq_create->u.request.context,
17234                                LPFC_RQ_RING_SIZE_2048);
17235                         break;
17236                 case 4096:
17237                         bf_set(lpfc_rq_context_rqe_count,
17238                                &rq_create->u.request.context,
17239                                LPFC_RQ_RING_SIZE_4096);
17240                         break;
17241                 }
17242                 if (subtype == LPFC_NVMET)
17243                         bf_set(lpfc_rq_context_buf_size,
17244                                &rq_create->u.request.context,
17245                                LPFC_NVMET_DATA_BUF_SIZE);
17246                 else
17247                         bf_set(lpfc_rq_context_buf_size,
17248                                &rq_create->u.request.context,
17249                                LPFC_DATA_BUF_SIZE);
17250         }
17251         bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17252                cq->queue_id);
17253         bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17254                drq->page_count);
17255         list_for_each_entry(dmabuf, &drq->page_list, list) {
17256                 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17257                                         putPaddrLow(dmabuf->phys);
17258                 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17259                                         putPaddrHigh(dmabuf->phys);
17260         }
17261         if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17262                 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17263         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17264         /* The IOCTL status is embedded in the mailbox subheader. */
17265         shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17266         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17267         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17268         if (shdr_status || shdr_add_status || rc) {
17269                 status = -ENXIO;
17270                 goto out;
17271         }
17272         drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17273         if (drq->queue_id == 0xFFFF) {
17274                 status = -ENXIO;
17275                 goto out;
17276         }
17277         drq->type = LPFC_DRQ;
17278         drq->assoc_qid = cq->queue_id;
17279         drq->subtype = subtype;
17280         drq->host_index = 0;
17281         drq->hba_index = 0;
17282         drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17283
17284         /* link the header and data RQs onto the parent cq child list */
17285         list_add_tail(&hrq->list, &cq->child_list);
17286         list_add_tail(&drq->list, &cq->child_list);
17287
17288 out:
17289         mempool_free(mbox, phba->mbox_mem_pool);
17290         return status;
17291 }
17292
17293 /**
17294  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17295  * @phba: HBA structure that indicates port to create a queue on.
17296  * @hrqp: The queue structure array to use to create the header receive queues.
17297  * @drqp: The queue structure array to use to create the data receive queues.
17298  * @cqp: The completion queue array to bind these receive queues to.
17299  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17300  *
17301  * This function creates a receive buffer queue pair , as detailed in @hrq and
17302  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17303  * to the HBA.
17304  *
17305  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17306  * struct is used to get the entry count that is necessary to determine the
17307  * number of pages to use for this queue. The @cq is used to indicate which
17308  * completion queue to bind received buffers that are posted to these queues to.
17309  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17310  * receive queue pair. This function is asynchronous and will wait for the
17311  * mailbox command to finish before continuing.
17312  *
17313  * On success this function will return a zero. If unable to allocate enough
17314  * memory this function will return -ENOMEM. If the queue create mailbox command
17315  * fails this function will return -ENXIO.
17316  **/
17317 int
17318 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17319                 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17320                 uint32_t subtype)
17321 {
17322         struct lpfc_queue *hrq, *drq, *cq;
17323         struct lpfc_mbx_rq_create_v2 *rq_create;
17324         struct lpfc_dmabuf *dmabuf;
17325         LPFC_MBOXQ_t *mbox;
17326         int rc, length, alloclen, status = 0;
17327         int cnt, idx, numrq, page_idx = 0;
17328         uint32_t shdr_status, shdr_add_status;
17329         union lpfc_sli4_cfg_shdr *shdr;
17330         uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17331
17332         numrq = phba->cfg_nvmet_mrq;
17333         /* sanity check on array memory */
17334         if (!hrqp || !drqp || !cqp || !numrq)
17335                 return -ENODEV;
17336         if (!phba->sli4_hba.pc_sli4_params.supported)
17337                 hw_page_size = SLI4_PAGE_SIZE;
17338
17339         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17340         if (!mbox)
17341                 return -ENOMEM;
17342
17343         length = sizeof(struct lpfc_mbx_rq_create_v2);
17344         length += ((2 * numrq * hrqp[0]->page_count) *
17345                    sizeof(struct dma_address));
17346
17347         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17348                                     LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17349                                     LPFC_SLI4_MBX_NEMBED);
17350         if (alloclen < length) {
17351                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17352                                 "3099 Allocated DMA memory size (%d) is "
17353                                 "less than the requested DMA memory size "
17354                                 "(%d)\n", alloclen, length);
17355                 status = -ENOMEM;
17356                 goto out;
17357         }
17358
17359
17360
17361         rq_create = mbox->sge_array->addr[0];
17362         shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17363
17364         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17365         cnt = 0;
17366
17367         for (idx = 0; idx < numrq; idx++) {
17368                 hrq = hrqp[idx];
17369                 drq = drqp[idx];
17370                 cq  = cqp[idx];
17371
17372                 /* sanity check on queue memory */
17373                 if (!hrq || !drq || !cq) {
17374                         status = -ENODEV;
17375                         goto out;
17376                 }
17377
17378                 if (hrq->entry_count != drq->entry_count) {
17379                         status = -EINVAL;
17380                         goto out;
17381                 }
17382
17383                 if (idx == 0) {
17384                         bf_set(lpfc_mbx_rq_create_num_pages,
17385                                &rq_create->u.request,
17386                                hrq->page_count);
17387                         bf_set(lpfc_mbx_rq_create_rq_cnt,
17388                                &rq_create->u.request, (numrq * 2));
17389                         bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17390                                1);
17391                         bf_set(lpfc_rq_context_base_cq,
17392                                &rq_create->u.request.context,
17393                                cq->queue_id);
17394                         bf_set(lpfc_rq_context_data_size,
17395                                &rq_create->u.request.context,
17396                                LPFC_NVMET_DATA_BUF_SIZE);
17397                         bf_set(lpfc_rq_context_hdr_size,
17398                                &rq_create->u.request.context,
17399                                LPFC_HDR_BUF_SIZE);
17400                         bf_set(lpfc_rq_context_rqe_count_1,
17401                                &rq_create->u.request.context,
17402                                hrq->entry_count);
17403                         bf_set(lpfc_rq_context_rqe_size,
17404                                &rq_create->u.request.context,
17405                                LPFC_RQE_SIZE_8);
17406                         bf_set(lpfc_rq_context_page_size,
17407                                &rq_create->u.request.context,
17408                                (PAGE_SIZE/SLI4_PAGE_SIZE));
17409                 }
17410                 rc = 0;
17411                 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17412                         memset(dmabuf->virt, 0, hw_page_size);
17413                         cnt = page_idx + dmabuf->buffer_tag;
17414                         rq_create->u.request.page[cnt].addr_lo =
17415                                         putPaddrLow(dmabuf->phys);
17416                         rq_create->u.request.page[cnt].addr_hi =
17417                                         putPaddrHigh(dmabuf->phys);
17418                         rc++;
17419                 }
17420                 page_idx += rc;
17421
17422                 rc = 0;
17423                 list_for_each_entry(dmabuf, &drq->page_list, list) {
17424                         memset(dmabuf->virt, 0, hw_page_size);
17425                         cnt = page_idx + dmabuf->buffer_tag;
17426                         rq_create->u.request.page[cnt].addr_lo =
17427                                         putPaddrLow(dmabuf->phys);
17428                         rq_create->u.request.page[cnt].addr_hi =
17429                                         putPaddrHigh(dmabuf->phys);
17430                         rc++;
17431                 }
17432                 page_idx += rc;
17433
17434                 hrq->db_format = LPFC_DB_RING_FORMAT;
17435                 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17436                 hrq->type = LPFC_HRQ;
17437                 hrq->assoc_qid = cq->queue_id;
17438                 hrq->subtype = subtype;
17439                 hrq->host_index = 0;
17440                 hrq->hba_index = 0;
17441                 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17442
17443                 drq->db_format = LPFC_DB_RING_FORMAT;
17444                 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17445                 drq->type = LPFC_DRQ;
17446                 drq->assoc_qid = cq->queue_id;
17447                 drq->subtype = subtype;
17448                 drq->host_index = 0;
17449                 drq->hba_index = 0;
17450                 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17451
17452                 list_add_tail(&hrq->list, &cq->child_list);
17453                 list_add_tail(&drq->list, &cq->child_list);
17454         }
17455
17456         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17457         /* The IOCTL status is embedded in the mailbox subheader. */
17458         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17459         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17460         if (shdr_status || shdr_add_status || rc) {
17461                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17462                                 "3120 RQ_CREATE mailbox failed with "
17463                                 "status x%x add_status x%x, mbx status x%x\n",
17464                                 shdr_status, shdr_add_status, rc);
17465                 status = -ENXIO;
17466                 goto out;
17467         }
17468         rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17469         if (rc == 0xFFFF) {
17470                 status = -ENXIO;
17471                 goto out;
17472         }
17473
17474         /* Initialize all RQs with associated queue id */
17475         for (idx = 0; idx < numrq; idx++) {
17476                 hrq = hrqp[idx];
17477                 hrq->queue_id = rc + (2 * idx);
17478                 drq = drqp[idx];
17479                 drq->queue_id = rc + (2 * idx) + 1;
17480         }
17481
17482 out:
17483         lpfc_sli4_mbox_cmd_free(phba, mbox);
17484         return status;
17485 }
17486
17487 /**
17488  * lpfc_eq_destroy - Destroy an event Queue on the HBA
17489  * @phba: HBA structure that indicates port to destroy a queue on.
17490  * @eq: The queue structure associated with the queue to destroy.
17491  *
17492  * This function destroys a queue, as detailed in @eq by sending an mailbox
17493  * command, specific to the type of queue, to the HBA.
17494  *
17495  * The @eq struct is used to get the queue ID of the queue to destroy.
17496  *
17497  * On success this function will return a zero. If the queue destroy mailbox
17498  * command fails this function will return -ENXIO.
17499  **/
17500 int
17501 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17502 {
17503         LPFC_MBOXQ_t *mbox;
17504         int rc, length, status = 0;
17505         uint32_t shdr_status, shdr_add_status;
17506         union lpfc_sli4_cfg_shdr *shdr;
17507
17508         /* sanity check on queue memory */
17509         if (!eq)
17510                 return -ENODEV;
17511
17512         mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17513         if (!mbox)
17514                 return -ENOMEM;
17515         length = (sizeof(struct lpfc_mbx_eq_destroy) -
17516                   sizeof(struct lpfc_sli4_cfg_mhdr));
17517         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17518                          LPFC_MBOX_OPCODE_EQ_DESTROY,
17519                          length, LPFC_SLI4_MBX_EMBED);
17520         bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17521                eq->queue_id);
17522         mbox->vport = eq->phba->pport;
17523         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17524
17525         rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17526         /* The IOCTL status is embedded in the mailbox subheader. */
17527         shdr = (union lpfc_sli4_cfg_shdr *)
17528                 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17529         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17530         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17531         if (shdr_status || shdr_add_status || rc) {
17532                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17533                                 "2505 EQ_DESTROY mailbox failed with "
17534                                 "status x%x add_status x%x, mbx status x%x\n",
17535                                 shdr_status, shdr_add_status, rc);
17536                 status = -ENXIO;
17537         }
17538
17539         /* Remove eq from any list */
17540         list_del_init(&eq->list);
17541         mempool_free(mbox, eq->phba->mbox_mem_pool);
17542         return status;
17543 }
17544
17545 /**
17546  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17547  * @phba: HBA structure that indicates port to destroy a queue on.
17548  * @cq: The queue structure associated with the queue to destroy.
17549  *
17550  * This function destroys a queue, as detailed in @cq by sending an mailbox
17551  * command, specific to the type of queue, to the HBA.
17552  *
17553  * The @cq struct is used to get the queue ID of the queue to destroy.
17554  *
17555  * On success this function will return a zero. If the queue destroy mailbox
17556  * command fails this function will return -ENXIO.
17557  **/
17558 int
17559 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17560 {
17561         LPFC_MBOXQ_t *mbox;
17562         int rc, length, status = 0;
17563         uint32_t shdr_status, shdr_add_status;
17564         union lpfc_sli4_cfg_shdr *shdr;
17565
17566         /* sanity check on queue memory */
17567         if (!cq)
17568                 return -ENODEV;
17569         mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17570         if (!mbox)
17571                 return -ENOMEM;
17572         length = (sizeof(struct lpfc_mbx_cq_destroy) -
17573                   sizeof(struct lpfc_sli4_cfg_mhdr));
17574         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17575                          LPFC_MBOX_OPCODE_CQ_DESTROY,
17576                          length, LPFC_SLI4_MBX_EMBED);
17577         bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17578                cq->queue_id);
17579         mbox->vport = cq->phba->pport;
17580         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17581         rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17582         /* The IOCTL status is embedded in the mailbox subheader. */
17583         shdr = (union lpfc_sli4_cfg_shdr *)
17584                 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17585         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17586         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17587         if (shdr_status || shdr_add_status || rc) {
17588                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17589                                 "2506 CQ_DESTROY mailbox failed with "
17590                                 "status x%x add_status x%x, mbx status x%x\n",
17591                                 shdr_status, shdr_add_status, rc);
17592                 status = -ENXIO;
17593         }
17594         /* Remove cq from any list */
17595         list_del_init(&cq->list);
17596         mempool_free(mbox, cq->phba->mbox_mem_pool);
17597         return status;
17598 }
17599
17600 /**
17601  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17602  * @phba: HBA structure that indicates port to destroy a queue on.
17603  * @mq: The queue structure associated with the queue to destroy.
17604  *
17605  * This function destroys a queue, as detailed in @mq by sending an mailbox
17606  * command, specific to the type of queue, to the HBA.
17607  *
17608  * The @mq struct is used to get the queue ID of the queue to destroy.
17609  *
17610  * On success this function will return a zero. If the queue destroy mailbox
17611  * command fails this function will return -ENXIO.
17612  **/
17613 int
17614 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17615 {
17616         LPFC_MBOXQ_t *mbox;
17617         int rc, length, status = 0;
17618         uint32_t shdr_status, shdr_add_status;
17619         union lpfc_sli4_cfg_shdr *shdr;
17620
17621         /* sanity check on queue memory */
17622         if (!mq)
17623                 return -ENODEV;
17624         mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17625         if (!mbox)
17626                 return -ENOMEM;
17627         length = (sizeof(struct lpfc_mbx_mq_destroy) -
17628                   sizeof(struct lpfc_sli4_cfg_mhdr));
17629         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17630                          LPFC_MBOX_OPCODE_MQ_DESTROY,
17631                          length, LPFC_SLI4_MBX_EMBED);
17632         bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17633                mq->queue_id);
17634         mbox->vport = mq->phba->pport;
17635         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17636         rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17637         /* The IOCTL status is embedded in the mailbox subheader. */
17638         shdr = (union lpfc_sli4_cfg_shdr *)
17639                 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17640         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17641         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17642         if (shdr_status || shdr_add_status || rc) {
17643                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17644                                 "2507 MQ_DESTROY mailbox failed with "
17645                                 "status x%x add_status x%x, mbx status x%x\n",
17646                                 shdr_status, shdr_add_status, rc);
17647                 status = -ENXIO;
17648         }
17649         /* Remove mq from any list */
17650         list_del_init(&mq->list);
17651         mempool_free(mbox, mq->phba->mbox_mem_pool);
17652         return status;
17653 }
17654
17655 /**
17656  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17657  * @phba: HBA structure that indicates port to destroy a queue on.
17658  * @wq: The queue structure associated with the queue to destroy.
17659  *
17660  * This function destroys a queue, as detailed in @wq by sending an mailbox
17661  * command, specific to the type of queue, to the HBA.
17662  *
17663  * The @wq struct is used to get the queue ID of the queue to destroy.
17664  *
17665  * On success this function will return a zero. If the queue destroy mailbox
17666  * command fails this function will return -ENXIO.
17667  **/
17668 int
17669 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17670 {
17671         LPFC_MBOXQ_t *mbox;
17672         int rc, length, status = 0;
17673         uint32_t shdr_status, shdr_add_status;
17674         union lpfc_sli4_cfg_shdr *shdr;
17675
17676         /* sanity check on queue memory */
17677         if (!wq)
17678                 return -ENODEV;
17679         mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17680         if (!mbox)
17681                 return -ENOMEM;
17682         length = (sizeof(struct lpfc_mbx_wq_destroy) -
17683                   sizeof(struct lpfc_sli4_cfg_mhdr));
17684         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17685                          LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17686                          length, LPFC_SLI4_MBX_EMBED);
17687         bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17688                wq->queue_id);
17689         mbox->vport = wq->phba->pport;
17690         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17691         rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17692         shdr = (union lpfc_sli4_cfg_shdr *)
17693                 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17694         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17695         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17696         if (shdr_status || shdr_add_status || rc) {
17697                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17698                                 "2508 WQ_DESTROY mailbox failed with "
17699                                 "status x%x add_status x%x, mbx status x%x\n",
17700                                 shdr_status, shdr_add_status, rc);
17701                 status = -ENXIO;
17702         }
17703         /* Remove wq from any list */
17704         list_del_init(&wq->list);
17705         kfree(wq->pring);
17706         wq->pring = NULL;
17707         mempool_free(mbox, wq->phba->mbox_mem_pool);
17708         return status;
17709 }
17710
17711 /**
17712  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17713  * @phba: HBA structure that indicates port to destroy a queue on.
17714  * @hrq: The queue structure associated with the queue to destroy.
17715  * @drq: The queue structure associated with the queue to destroy.
17716  *
17717  * This function destroys a queue, as detailed in @rq by sending an mailbox
17718  * command, specific to the type of queue, to the HBA.
17719  *
17720  * The @rq struct is used to get the queue ID of the queue to destroy.
17721  *
17722  * On success this function will return a zero. If the queue destroy mailbox
17723  * command fails this function will return -ENXIO.
17724  **/
17725 int
17726 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17727                 struct lpfc_queue *drq)
17728 {
17729         LPFC_MBOXQ_t *mbox;
17730         int rc, length, status = 0;
17731         uint32_t shdr_status, shdr_add_status;
17732         union lpfc_sli4_cfg_shdr *shdr;
17733
17734         /* sanity check on queue memory */
17735         if (!hrq || !drq)
17736                 return -ENODEV;
17737         mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17738         if (!mbox)
17739                 return -ENOMEM;
17740         length = (sizeof(struct lpfc_mbx_rq_destroy) -
17741                   sizeof(struct lpfc_sli4_cfg_mhdr));
17742         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17743                          LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17744                          length, LPFC_SLI4_MBX_EMBED);
17745         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17746                hrq->queue_id);
17747         mbox->vport = hrq->phba->pport;
17748         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17749         rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17750         /* The IOCTL status is embedded in the mailbox subheader. */
17751         shdr = (union lpfc_sli4_cfg_shdr *)
17752                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17753         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17754         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17755         if (shdr_status || shdr_add_status || rc) {
17756                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17757                                 "2509 RQ_DESTROY mailbox failed with "
17758                                 "status x%x add_status x%x, mbx status x%x\n",
17759                                 shdr_status, shdr_add_status, rc);
17760                 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17761                 return -ENXIO;
17762         }
17763         bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17764                drq->queue_id);
17765         rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17766         shdr = (union lpfc_sli4_cfg_shdr *)
17767                 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17768         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17769         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17770         if (shdr_status || shdr_add_status || rc) {
17771                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17772                                 "2510 RQ_DESTROY mailbox failed with "
17773                                 "status x%x add_status x%x, mbx status x%x\n",
17774                                 shdr_status, shdr_add_status, rc);
17775                 status = -ENXIO;
17776         }
17777         list_del_init(&hrq->list);
17778         list_del_init(&drq->list);
17779         mempool_free(mbox, hrq->phba->mbox_mem_pool);
17780         return status;
17781 }
17782
17783 /**
17784  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17785  * @phba: The virtual port for which this call being executed.
17786  * @pdma_phys_addr0: Physical address of the 1st SGL page.
17787  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17788  * @xritag: the xritag that ties this io to the SGL pages.
17789  *
17790  * This routine will post the sgl pages for the IO that has the xritag
17791  * that is in the iocbq structure. The xritag is assigned during iocbq
17792  * creation and persists for as long as the driver is loaded.
17793  * if the caller has fewer than 256 scatter gather segments to map then
17794  * pdma_phys_addr1 should be 0.
17795  * If the caller needs to map more than 256 scatter gather segment then
17796  * pdma_phys_addr1 should be a valid physical address.
17797  * physical address for SGLs must be 64 byte aligned.
17798  * If you are going to map 2 SGL's then the first one must have 256 entries
17799  * the second sgl can have between 1 and 256 entries.
17800  *
17801  * Return codes:
17802  *      0 - Success
17803  *      -ENXIO, -ENOMEM - Failure
17804  **/
17805 int
17806 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17807                 dma_addr_t pdma_phys_addr0,
17808                 dma_addr_t pdma_phys_addr1,
17809                 uint16_t xritag)
17810 {
17811         struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17812         LPFC_MBOXQ_t *mbox;
17813         int rc;
17814         uint32_t shdr_status, shdr_add_status;
17815         uint32_t mbox_tmo;
17816         union lpfc_sli4_cfg_shdr *shdr;
17817
17818         if (xritag == NO_XRI) {
17819                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17820                                 "0364 Invalid param:\n");
17821                 return -EINVAL;
17822         }
17823
17824         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17825         if (!mbox)
17826                 return -ENOMEM;
17827
17828         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17829                         LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17830                         sizeof(struct lpfc_mbx_post_sgl_pages) -
17831                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17832
17833         post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17834                                 &mbox->u.mqe.un.post_sgl_pages;
17835         bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17836         bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17837
17838         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17839                                 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17840         post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17841                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17842
17843         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17844                                 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17845         post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17846                                 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17847         if (!phba->sli4_hba.intr_enable)
17848                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17849         else {
17850                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17851                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17852         }
17853         /* The IOCTL status is embedded in the mailbox subheader. */
17854         shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17855         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17856         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17857         if (!phba->sli4_hba.intr_enable)
17858                 mempool_free(mbox, phba->mbox_mem_pool);
17859         else if (rc != MBX_TIMEOUT)
17860                 mempool_free(mbox, phba->mbox_mem_pool);
17861         if (shdr_status || shdr_add_status || rc) {
17862                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17863                                 "2511 POST_SGL mailbox failed with "
17864                                 "status x%x add_status x%x, mbx status x%x\n",
17865                                 shdr_status, shdr_add_status, rc);
17866         }
17867         return 0;
17868 }
17869
17870 /**
17871  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17872  * @phba: pointer to lpfc hba data structure.
17873  *
17874  * This routine is invoked to post rpi header templates to the
17875  * HBA consistent with the SLI-4 interface spec.  This routine
17876  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17877  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17878  *
17879  * Returns
17880  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17881  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
17882  **/
17883 static uint16_t
17884 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17885 {
17886         unsigned long xri;
17887
17888         /*
17889          * Fetch the next logical xri.  Because this index is logical,
17890          * the driver starts at 0 each time.
17891          */
17892         spin_lock_irq(&phba->hbalock);
17893         xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17894                                  phba->sli4_hba.max_cfg_param.max_xri);
17895         if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17896                 spin_unlock_irq(&phba->hbalock);
17897                 return NO_XRI;
17898         } else {
17899                 set_bit(xri, phba->sli4_hba.xri_bmask);
17900                 phba->sli4_hba.max_cfg_param.xri_used++;
17901         }
17902         spin_unlock_irq(&phba->hbalock);
17903         return xri;
17904 }
17905
17906 /**
17907  * __lpfc_sli4_free_xri - Release an xri for reuse.
17908  * @phba: pointer to lpfc hba data structure.
17909  * @xri: xri to release.
17910  *
17911  * This routine is invoked to release an xri to the pool of
17912  * available rpis maintained by the driver.
17913  **/
17914 static void
17915 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17916 {
17917         if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17918                 phba->sli4_hba.max_cfg_param.xri_used--;
17919         }
17920 }
17921
17922 /**
17923  * lpfc_sli4_free_xri - Release an xri for reuse.
17924  * @phba: pointer to lpfc hba data structure.
17925  * @xri: xri to release.
17926  *
17927  * This routine is invoked to release an xri to the pool of
17928  * available rpis maintained by the driver.
17929  **/
17930 void
17931 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17932 {
17933         spin_lock_irq(&phba->hbalock);
17934         __lpfc_sli4_free_xri(phba, xri);
17935         spin_unlock_irq(&phba->hbalock);
17936 }
17937
17938 /**
17939  * lpfc_sli4_next_xritag - Get an xritag for the io
17940  * @phba: Pointer to HBA context object.
17941  *
17942  * This function gets an xritag for the iocb. If there is no unused xritag
17943  * it will return 0xffff.
17944  * The function returns the allocated xritag if successful, else returns zero.
17945  * Zero is not a valid xritag.
17946  * The caller is not required to hold any lock.
17947  **/
17948 uint16_t
17949 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17950 {
17951         uint16_t xri_index;
17952
17953         xri_index = lpfc_sli4_alloc_xri(phba);
17954         if (xri_index == NO_XRI)
17955                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17956                                 "2004 Failed to allocate XRI.last XRITAG is %d"
17957                                 " Max XRI is %d, Used XRI is %d\n",
17958                                 xri_index,
17959                                 phba->sli4_hba.max_cfg_param.max_xri,
17960                                 phba->sli4_hba.max_cfg_param.xri_used);
17961         return xri_index;
17962 }
17963
17964 /**
17965  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17966  * @phba: pointer to lpfc hba data structure.
17967  * @post_sgl_list: pointer to els sgl entry list.
17968  * @post_cnt: number of els sgl entries on the list.
17969  *
17970  * This routine is invoked to post a block of driver's sgl pages to the
17971  * HBA using non-embedded mailbox command. No Lock is held. This routine
17972  * is only called when the driver is loading and after all IO has been
17973  * stopped.
17974  **/
17975 static int
17976 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17977                             struct list_head *post_sgl_list,
17978                             int post_cnt)
17979 {
17980         struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17981         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17982         struct sgl_page_pairs *sgl_pg_pairs;
17983         void *viraddr;
17984         LPFC_MBOXQ_t *mbox;
17985         uint32_t reqlen, alloclen, pg_pairs;
17986         uint32_t mbox_tmo;
17987         uint16_t xritag_start = 0;
17988         int rc = 0;
17989         uint32_t shdr_status, shdr_add_status;
17990         union lpfc_sli4_cfg_shdr *shdr;
17991
17992         reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17993                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17994         if (reqlen > SLI4_PAGE_SIZE) {
17995                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17996                                 "2559 Block sgl registration required DMA "
17997                                 "size (%d) great than a page\n", reqlen);
17998                 return -ENOMEM;
17999         }
18000
18001         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18002         if (!mbox)
18003                 return -ENOMEM;
18004
18005         /* Allocate DMA memory and set up the non-embedded mailbox command */
18006         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18007                          LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18008                          LPFC_SLI4_MBX_NEMBED);
18009
18010         if (alloclen < reqlen) {
18011                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18012                                 "0285 Allocated DMA memory size (%d) is "
18013                                 "less than the requested DMA memory "
18014                                 "size (%d)\n", alloclen, reqlen);
18015                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18016                 return -ENOMEM;
18017         }
18018         /* Set up the SGL pages in the non-embedded DMA pages */
18019         viraddr = mbox->sge_array->addr[0];
18020         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18021         sgl_pg_pairs = &sgl->sgl_pg_pairs;
18022
18023         pg_pairs = 0;
18024         list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18025                 /* Set up the sge entry */
18026                 sgl_pg_pairs->sgl_pg0_addr_lo =
18027                                 cpu_to_le32(putPaddrLow(sglq_entry->phys));
18028                 sgl_pg_pairs->sgl_pg0_addr_hi =
18029                                 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18030                 sgl_pg_pairs->sgl_pg1_addr_lo =
18031                                 cpu_to_le32(putPaddrLow(0));
18032                 sgl_pg_pairs->sgl_pg1_addr_hi =
18033                                 cpu_to_le32(putPaddrHigh(0));
18034
18035                 /* Keep the first xritag on the list */
18036                 if (pg_pairs == 0)
18037                         xritag_start = sglq_entry->sli4_xritag;
18038                 sgl_pg_pairs++;
18039                 pg_pairs++;
18040         }
18041
18042         /* Complete initialization and perform endian conversion. */
18043         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18044         bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18045         sgl->word0 = cpu_to_le32(sgl->word0);
18046
18047         if (!phba->sli4_hba.intr_enable)
18048                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18049         else {
18050                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18051                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18052         }
18053         shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18054         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18055         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18056         if (!phba->sli4_hba.intr_enable)
18057                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18058         else if (rc != MBX_TIMEOUT)
18059                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18060         if (shdr_status || shdr_add_status || rc) {
18061                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18062                                 "2513 POST_SGL_BLOCK mailbox command failed "
18063                                 "status x%x add_status x%x mbx status x%x\n",
18064                                 shdr_status, shdr_add_status, rc);
18065                 rc = -ENXIO;
18066         }
18067         return rc;
18068 }
18069
18070 /**
18071  * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18072  * @phba: pointer to lpfc hba data structure.
18073  * @nblist: pointer to nvme buffer list.
18074  * @count: number of scsi buffers on the list.
18075  *
18076  * This routine is invoked to post a block of @count scsi sgl pages from a
18077  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18078  * No Lock is held.
18079  *
18080  **/
18081 static int
18082 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18083                             int count)
18084 {
18085         struct lpfc_io_buf *lpfc_ncmd;
18086         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18087         struct sgl_page_pairs *sgl_pg_pairs;
18088         void *viraddr;
18089         LPFC_MBOXQ_t *mbox;
18090         uint32_t reqlen, alloclen, pg_pairs;
18091         uint32_t mbox_tmo;
18092         uint16_t xritag_start = 0;
18093         int rc = 0;
18094         uint32_t shdr_status, shdr_add_status;
18095         dma_addr_t pdma_phys_bpl1;
18096         union lpfc_sli4_cfg_shdr *shdr;
18097
18098         /* Calculate the requested length of the dma memory */
18099         reqlen = count * sizeof(struct sgl_page_pairs) +
18100                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18101         if (reqlen > SLI4_PAGE_SIZE) {
18102                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18103                                 "6118 Block sgl registration required DMA "
18104                                 "size (%d) great than a page\n", reqlen);
18105                 return -ENOMEM;
18106         }
18107         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18108         if (!mbox) {
18109                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18110                                 "6119 Failed to allocate mbox cmd memory\n");
18111                 return -ENOMEM;
18112         }
18113
18114         /* Allocate DMA memory and set up the non-embedded mailbox command */
18115         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18116                                     LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18117                                     reqlen, LPFC_SLI4_MBX_NEMBED);
18118
18119         if (alloclen < reqlen) {
18120                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18121                                 "6120 Allocated DMA memory size (%d) is "
18122                                 "less than the requested DMA memory "
18123                                 "size (%d)\n", alloclen, reqlen);
18124                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18125                 return -ENOMEM;
18126         }
18127
18128         /* Get the first SGE entry from the non-embedded DMA memory */
18129         viraddr = mbox->sge_array->addr[0];
18130
18131         /* Set up the SGL pages in the non-embedded DMA pages */
18132         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18133         sgl_pg_pairs = &sgl->sgl_pg_pairs;
18134
18135         pg_pairs = 0;
18136         list_for_each_entry(lpfc_ncmd, nblist, list) {
18137                 /* Set up the sge entry */
18138                 sgl_pg_pairs->sgl_pg0_addr_lo =
18139                         cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18140                 sgl_pg_pairs->sgl_pg0_addr_hi =
18141                         cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18142                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18143                         pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18144                                                 SGL_PAGE_SIZE;
18145                 else
18146                         pdma_phys_bpl1 = 0;
18147                 sgl_pg_pairs->sgl_pg1_addr_lo =
18148                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18149                 sgl_pg_pairs->sgl_pg1_addr_hi =
18150                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18151                 /* Keep the first xritag on the list */
18152                 if (pg_pairs == 0)
18153                         xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18154                 sgl_pg_pairs++;
18155                 pg_pairs++;
18156         }
18157         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18158         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18159         /* Perform endian conversion if necessary */
18160         sgl->word0 = cpu_to_le32(sgl->word0);
18161
18162         if (!phba->sli4_hba.intr_enable) {
18163                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18164         } else {
18165                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18166                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18167         }
18168         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18169         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18170         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18171         if (!phba->sli4_hba.intr_enable)
18172                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18173         else if (rc != MBX_TIMEOUT)
18174                 lpfc_sli4_mbox_cmd_free(phba, mbox);
18175         if (shdr_status || shdr_add_status || rc) {
18176                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18177                                 "6125 POST_SGL_BLOCK mailbox command failed "
18178                                 "status x%x add_status x%x mbx status x%x\n",
18179                                 shdr_status, shdr_add_status, rc);
18180                 rc = -ENXIO;
18181         }
18182         return rc;
18183 }
18184
18185 /**
18186  * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18187  * @phba: pointer to lpfc hba data structure.
18188  * @post_nblist: pointer to the nvme buffer list.
18189  * @sb_count: number of nvme buffers.
18190  *
18191  * This routine walks a list of nvme buffers that was passed in. It attempts
18192  * to construct blocks of nvme buffer sgls which contains contiguous xris and
18193  * uses the non-embedded SGL block post mailbox commands to post to the port.
18194  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18195  * embedded SGL post mailbox command for posting. The @post_nblist passed in
18196  * must be local list, thus no lock is needed when manipulate the list.
18197  *
18198  * Returns: 0 = failure, non-zero number of successfully posted buffers.
18199  **/
18200 int
18201 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18202                            struct list_head *post_nblist, int sb_count)
18203 {
18204         struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18205         int status, sgl_size;
18206         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18207         dma_addr_t pdma_phys_sgl1;
18208         int last_xritag = NO_XRI;
18209         int cur_xritag;
18210         LIST_HEAD(prep_nblist);
18211         LIST_HEAD(blck_nblist);
18212         LIST_HEAD(nvme_nblist);
18213
18214         /* sanity check */
18215         if (sb_count <= 0)
18216                 return -EINVAL;
18217
18218         sgl_size = phba->cfg_sg_dma_buf_size;
18219         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18220                 list_del_init(&lpfc_ncmd->list);
18221                 block_cnt++;
18222                 if ((last_xritag != NO_XRI) &&
18223                     (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18224                         /* a hole in xri block, form a sgl posting block */
18225                         list_splice_init(&prep_nblist, &blck_nblist);
18226                         post_cnt = block_cnt - 1;
18227                         /* prepare list for next posting block */
18228                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18229                         block_cnt = 1;
18230                 } else {
18231                         /* prepare list for next posting block */
18232                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18233                         /* enough sgls for non-embed sgl mbox command */
18234                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18235                                 list_splice_init(&prep_nblist, &blck_nblist);
18236                                 post_cnt = block_cnt;
18237                                 block_cnt = 0;
18238                         }
18239                 }
18240                 num_posting++;
18241                 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18242
18243                 /* end of repost sgl list condition for NVME buffers */
18244                 if (num_posting == sb_count) {
18245                         if (post_cnt == 0) {
18246                                 /* last sgl posting block */
18247                                 list_splice_init(&prep_nblist, &blck_nblist);
18248                                 post_cnt = block_cnt;
18249                         } else if (block_cnt == 1) {
18250                                 /* last single sgl with non-contiguous xri */
18251                                 if (sgl_size > SGL_PAGE_SIZE)
18252                                         pdma_phys_sgl1 =
18253                                                 lpfc_ncmd->dma_phys_sgl +
18254                                                 SGL_PAGE_SIZE;
18255                                 else
18256                                         pdma_phys_sgl1 = 0;
18257                                 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18258                                 status = lpfc_sli4_post_sgl(
18259                                                 phba, lpfc_ncmd->dma_phys_sgl,
18260                                                 pdma_phys_sgl1, cur_xritag);
18261                                 if (status) {
18262                                         /* Post error.  Buffer unavailable. */
18263                                         lpfc_ncmd->flags |=
18264                                                 LPFC_SBUF_NOT_POSTED;
18265                                 } else {
18266                                         /* Post success. Bffer available. */
18267                                         lpfc_ncmd->flags &=
18268                                                 ~LPFC_SBUF_NOT_POSTED;
18269                                         lpfc_ncmd->status = IOSTAT_SUCCESS;
18270                                         num_posted++;
18271                                 }
18272                                 /* success, put on NVME buffer sgl list */
18273                                 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18274                         }
18275                 }
18276
18277                 /* continue until a nembed page worth of sgls */
18278                 if (post_cnt == 0)
18279                         continue;
18280
18281                 /* post block of NVME buffer list sgls */
18282                 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18283                                                      post_cnt);
18284
18285                 /* don't reset xirtag due to hole in xri block */
18286                 if (block_cnt == 0)
18287                         last_xritag = NO_XRI;
18288
18289                 /* reset NVME buffer post count for next round of posting */
18290                 post_cnt = 0;
18291
18292                 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18293                 while (!list_empty(&blck_nblist)) {
18294                         list_remove_head(&blck_nblist, lpfc_ncmd,
18295                                          struct lpfc_io_buf, list);
18296                         if (status) {
18297                                 /* Post error.  Mark buffer unavailable. */
18298                                 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18299                         } else {
18300                                 /* Post success, Mark buffer available. */
18301                                 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18302                                 lpfc_ncmd->status = IOSTAT_SUCCESS;
18303                                 num_posted++;
18304                         }
18305                         list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18306                 }
18307         }
18308         /* Push NVME buffers with sgl posted to the available list */
18309         lpfc_io_buf_replenish(phba, &nvme_nblist);
18310
18311         return num_posted;
18312 }
18313
18314 /**
18315  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18316  * @phba: pointer to lpfc_hba struct that the frame was received on
18317  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18318  *
18319  * This function checks the fields in the @fc_hdr to see if the FC frame is a
18320  * valid type of frame that the LPFC driver will handle. This function will
18321  * return a zero if the frame is a valid frame or a non zero value when the
18322  * frame does not pass the check.
18323  **/
18324 static int
18325 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18326 {
18327         /*  make rctl_names static to save stack space */
18328         struct fc_vft_header *fc_vft_hdr;
18329         uint32_t *header = (uint32_t *) fc_hdr;
18330
18331 #define FC_RCTL_MDS_DIAGS       0xF4
18332
18333         switch (fc_hdr->fh_r_ctl) {
18334         case FC_RCTL_DD_UNCAT:          /* uncategorized information */
18335         case FC_RCTL_DD_SOL_DATA:       /* solicited data */
18336         case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
18337         case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
18338         case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
18339         case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
18340         case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
18341         case FC_RCTL_DD_CMD_STATUS:     /* command status */
18342         case FC_RCTL_ELS_REQ:   /* extended link services request */
18343         case FC_RCTL_ELS_REP:   /* extended link services reply */
18344         case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
18345         case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
18346         case FC_RCTL_BA_ABTS:   /* basic link service abort */
18347         case FC_RCTL_BA_RMC:    /* remove connection */
18348         case FC_RCTL_BA_ACC:    /* basic accept */
18349         case FC_RCTL_BA_RJT:    /* basic reject */
18350         case FC_RCTL_BA_PRMT:
18351         case FC_RCTL_ACK_1:     /* acknowledge_1 */
18352         case FC_RCTL_ACK_0:     /* acknowledge_0 */
18353         case FC_RCTL_P_RJT:     /* port reject */
18354         case FC_RCTL_F_RJT:     /* fabric reject */
18355         case FC_RCTL_P_BSY:     /* port busy */
18356         case FC_RCTL_F_BSY:     /* fabric busy to data frame */
18357         case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
18358         case FC_RCTL_LCR:       /* link credit reset */
18359         case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18360         case FC_RCTL_END:       /* end */
18361                 break;
18362         case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
18363                 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18364                 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18365                 return lpfc_fc_frame_check(phba, fc_hdr);
18366         case FC_RCTL_BA_NOP:    /* basic link service NOP */
18367         default:
18368                 goto drop;
18369         }
18370
18371         switch (fc_hdr->fh_type) {
18372         case FC_TYPE_BLS:
18373         case FC_TYPE_ELS:
18374         case FC_TYPE_FCP:
18375         case FC_TYPE_CT:
18376         case FC_TYPE_NVME:
18377                 break;
18378         case FC_TYPE_IP:
18379         case FC_TYPE_ILS:
18380         default:
18381                 goto drop;
18382         }
18383
18384         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18385                         "2538 Received frame rctl:x%x, type:x%x, "
18386                         "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18387                         fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18388                         be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18389                         be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18390                         be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18391                         be32_to_cpu(header[6]));
18392         return 0;
18393 drop:
18394         lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18395                         "2539 Dropped frame rctl:x%x type:x%x\n",
18396                         fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18397         return 1;
18398 }
18399
18400 /**
18401  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18402  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18403  *
18404  * This function processes the FC header to retrieve the VFI from the VF
18405  * header, if one exists. This function will return the VFI if one exists
18406  * or 0 if no VSAN Header exists.
18407  **/
18408 static uint32_t
18409 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18410 {
18411         struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18412
18413         if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18414                 return 0;
18415         return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18416 }
18417
18418 /**
18419  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18420  * @phba: Pointer to the HBA structure to search for the vport on
18421  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18422  * @fcfi: The FC Fabric ID that the frame came from
18423  * @did: Destination ID to match against
18424  *
18425  * This function searches the @phba for a vport that matches the content of the
18426  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18427  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18428  * returns the matching vport pointer or NULL if unable to match frame to a
18429  * vport.
18430  **/
18431 static struct lpfc_vport *
18432 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18433                        uint16_t fcfi, uint32_t did)
18434 {
18435         struct lpfc_vport **vports;
18436         struct lpfc_vport *vport = NULL;
18437         int i;
18438
18439         if (did == Fabric_DID)
18440                 return phba->pport;
18441         if ((phba->pport->fc_flag & FC_PT2PT) &&
18442                 !(phba->link_state == LPFC_HBA_READY))
18443                 return phba->pport;
18444
18445         vports = lpfc_create_vport_work_array(phba);
18446         if (vports != NULL) {
18447                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18448                         if (phba->fcf.fcfi == fcfi &&
18449                             vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18450                             vports[i]->fc_myDID == did) {
18451                                 vport = vports[i];
18452                                 break;
18453                         }
18454                 }
18455         }
18456         lpfc_destroy_vport_work_array(phba, vports);
18457         return vport;
18458 }
18459
18460 /**
18461  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18462  * @vport: The vport to work on.
18463  *
18464  * This function updates the receive sequence time stamp for this vport. The
18465  * receive sequence time stamp indicates the time that the last frame of the
18466  * the sequence that has been idle for the longest amount of time was received.
18467  * the driver uses this time stamp to indicate if any received sequences have
18468  * timed out.
18469  **/
18470 static void
18471 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18472 {
18473         struct lpfc_dmabuf *h_buf;
18474         struct hbq_dmabuf *dmabuf = NULL;
18475
18476         /* get the oldest sequence on the rcv list */
18477         h_buf = list_get_first(&vport->rcv_buffer_list,
18478                                struct lpfc_dmabuf, list);
18479         if (!h_buf)
18480                 return;
18481         dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18482         vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18483 }
18484
18485 /**
18486  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18487  * @vport: The vport that the received sequences were sent to.
18488  *
18489  * This function cleans up all outstanding received sequences. This is called
18490  * by the driver when a link event or user action invalidates all the received
18491  * sequences.
18492  **/
18493 void
18494 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18495 {
18496         struct lpfc_dmabuf *h_buf, *hnext;
18497         struct lpfc_dmabuf *d_buf, *dnext;
18498         struct hbq_dmabuf *dmabuf = NULL;
18499
18500         /* start with the oldest sequence on the rcv list */
18501         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18502                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18503                 list_del_init(&dmabuf->hbuf.list);
18504                 list_for_each_entry_safe(d_buf, dnext,
18505                                          &dmabuf->dbuf.list, list) {
18506                         list_del_init(&d_buf->list);
18507                         lpfc_in_buf_free(vport->phba, d_buf);
18508                 }
18509                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18510         }
18511 }
18512
18513 /**
18514  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18515  * @vport: The vport that the received sequences were sent to.
18516  *
18517  * This function determines whether any received sequences have timed out by
18518  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18519  * indicates that there is at least one timed out sequence this routine will
18520  * go through the received sequences one at a time from most inactive to most
18521  * active to determine which ones need to be cleaned up. Once it has determined
18522  * that a sequence needs to be cleaned up it will simply free up the resources
18523  * without sending an abort.
18524  **/
18525 void
18526 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18527 {
18528         struct lpfc_dmabuf *h_buf, *hnext;
18529         struct lpfc_dmabuf *d_buf, *dnext;
18530         struct hbq_dmabuf *dmabuf = NULL;
18531         unsigned long timeout;
18532         int abort_count = 0;
18533
18534         timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18535                    vport->rcv_buffer_time_stamp);
18536         if (list_empty(&vport->rcv_buffer_list) ||
18537             time_before(jiffies, timeout))
18538                 return;
18539         /* start with the oldest sequence on the rcv list */
18540         list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18541                 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18542                 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18543                            dmabuf->time_stamp);
18544                 if (time_before(jiffies, timeout))
18545                         break;
18546                 abort_count++;
18547                 list_del_init(&dmabuf->hbuf.list);
18548                 list_for_each_entry_safe(d_buf, dnext,
18549                                          &dmabuf->dbuf.list, list) {
18550                         list_del_init(&d_buf->list);
18551                         lpfc_in_buf_free(vport->phba, d_buf);
18552                 }
18553                 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18554         }
18555         if (abort_count)
18556                 lpfc_update_rcv_time_stamp(vport);
18557 }
18558
18559 /**
18560  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18561  * @vport: pointer to a vitural port
18562  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18563  *
18564  * This function searches through the existing incomplete sequences that have
18565  * been sent to this @vport. If the frame matches one of the incomplete
18566  * sequences then the dbuf in the @dmabuf is added to the list of frames that
18567  * make up that sequence. If no sequence is found that matches this frame then
18568  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18569  * This function returns a pointer to the first dmabuf in the sequence list that
18570  * the frame was linked to.
18571  **/
18572 static struct hbq_dmabuf *
18573 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18574 {
18575         struct fc_frame_header *new_hdr;
18576         struct fc_frame_header *temp_hdr;
18577         struct lpfc_dmabuf *d_buf;
18578         struct lpfc_dmabuf *h_buf;
18579         struct hbq_dmabuf *seq_dmabuf = NULL;
18580         struct hbq_dmabuf *temp_dmabuf = NULL;
18581         uint8_t found = 0;
18582
18583         INIT_LIST_HEAD(&dmabuf->dbuf.list);
18584         dmabuf->time_stamp = jiffies;
18585         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18586
18587         /* Use the hdr_buf to find the sequence that this frame belongs to */
18588         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18589                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18590                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18591                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18592                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18593                         continue;
18594                 /* found a pending sequence that matches this frame */
18595                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18596                 break;
18597         }
18598         if (!seq_dmabuf) {
18599                 /*
18600                  * This indicates first frame received for this sequence.
18601                  * Queue the buffer on the vport's rcv_buffer_list.
18602                  */
18603                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18604                 lpfc_update_rcv_time_stamp(vport);
18605                 return dmabuf;
18606         }
18607         temp_hdr = seq_dmabuf->hbuf.virt;
18608         if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18609                 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18610                 list_del_init(&seq_dmabuf->hbuf.list);
18611                 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18612                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18613                 lpfc_update_rcv_time_stamp(vport);
18614                 return dmabuf;
18615         }
18616         /* move this sequence to the tail to indicate a young sequence */
18617         list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18618         seq_dmabuf->time_stamp = jiffies;
18619         lpfc_update_rcv_time_stamp(vport);
18620         if (list_empty(&seq_dmabuf->dbuf.list)) {
18621                 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18622                 return seq_dmabuf;
18623         }
18624         /* find the correct place in the sequence to insert this frame */
18625         d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18626         while (!found) {
18627                 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18628                 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18629                 /*
18630                  * If the frame's sequence count is greater than the frame on
18631                  * the list then insert the frame right after this frame
18632                  */
18633                 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18634                         be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18635                         list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18636                         found = 1;
18637                         break;
18638                 }
18639
18640                 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18641                         break;
18642                 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18643         }
18644
18645         if (found)
18646                 return seq_dmabuf;
18647         return NULL;
18648 }
18649
18650 /**
18651  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18652  * @vport: pointer to a vitural port
18653  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18654  *
18655  * This function tries to abort from the partially assembed sequence, described
18656  * by the information from basic abbort @dmabuf. It checks to see whether such
18657  * partially assembled sequence held by the driver. If so, it shall free up all
18658  * the frames from the partially assembled sequence.
18659  *
18660  * Return
18661  * true  -- if there is matching partially assembled sequence present and all
18662  *          the frames freed with the sequence;
18663  * false -- if there is no matching partially assembled sequence present so
18664  *          nothing got aborted in the lower layer driver
18665  **/
18666 static bool
18667 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18668                             struct hbq_dmabuf *dmabuf)
18669 {
18670         struct fc_frame_header *new_hdr;
18671         struct fc_frame_header *temp_hdr;
18672         struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18673         struct hbq_dmabuf *seq_dmabuf = NULL;
18674
18675         /* Use the hdr_buf to find the sequence that matches this frame */
18676         INIT_LIST_HEAD(&dmabuf->dbuf.list);
18677         INIT_LIST_HEAD(&dmabuf->hbuf.list);
18678         new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18679         list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18680                 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18681                 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18682                     (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18683                     (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18684                         continue;
18685                 /* found a pending sequence that matches this frame */
18686                 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18687                 break;
18688         }
18689
18690         /* Free up all the frames from the partially assembled sequence */
18691         if (seq_dmabuf) {
18692                 list_for_each_entry_safe(d_buf, n_buf,
18693                                          &seq_dmabuf->dbuf.list, list) {
18694                         list_del_init(&d_buf->list);
18695                         lpfc_in_buf_free(vport->phba, d_buf);
18696                 }
18697                 return true;
18698         }
18699         return false;
18700 }
18701
18702 /**
18703  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18704  * @vport: pointer to a vitural port
18705  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18706  *
18707  * This function tries to abort from the assembed sequence from upper level
18708  * protocol, described by the information from basic abbort @dmabuf. It
18709  * checks to see whether such pending context exists at upper level protocol.
18710  * If so, it shall clean up the pending context.
18711  *
18712  * Return
18713  * true  -- if there is matching pending context of the sequence cleaned
18714  *          at ulp;
18715  * false -- if there is no matching pending context of the sequence present
18716  *          at ulp.
18717  **/
18718 static bool
18719 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18720 {
18721         struct lpfc_hba *phba = vport->phba;
18722         int handled;
18723
18724         /* Accepting abort at ulp with SLI4 only */
18725         if (phba->sli_rev < LPFC_SLI_REV4)
18726                 return false;
18727
18728         /* Register all caring upper level protocols to attend abort */
18729         handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18730         if (handled)
18731                 return true;
18732
18733         return false;
18734 }
18735
18736 /**
18737  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18738  * @phba: Pointer to HBA context object.
18739  * @cmd_iocbq: pointer to the command iocbq structure.
18740  * @rsp_iocbq: pointer to the response iocbq structure.
18741  *
18742  * This function handles the sequence abort response iocb command complete
18743  * event. It properly releases the memory allocated to the sequence abort
18744  * accept iocb.
18745  **/
18746 static void
18747 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18748                              struct lpfc_iocbq *cmd_iocbq,
18749                              struct lpfc_iocbq *rsp_iocbq)
18750 {
18751         if (cmd_iocbq) {
18752                 lpfc_nlp_put(cmd_iocbq->ndlp);
18753                 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18754         }
18755
18756         /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18757         if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18758                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18759                         "3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
18760                         get_job_ulpstatus(phba, rsp_iocbq),
18761                         get_job_word4(phba, rsp_iocbq));
18762 }
18763
18764 /**
18765  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18766  * @phba: Pointer to HBA context object.
18767  * @xri: xri id in transaction.
18768  *
18769  * This function validates the xri maps to the known range of XRIs allocated an
18770  * used by the driver.
18771  **/
18772 uint16_t
18773 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18774                       uint16_t xri)
18775 {
18776         uint16_t i;
18777
18778         for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18779                 if (xri == phba->sli4_hba.xri_ids[i])
18780                         return i;
18781         }
18782         return NO_XRI;
18783 }
18784
18785 /**
18786  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18787  * @vport: pointer to a virtual port.
18788  * @fc_hdr: pointer to a FC frame header.
18789  * @aborted: was the partially assembled receive sequence successfully aborted
18790  *
18791  * This function sends a basic response to a previous unsol sequence abort
18792  * event after aborting the sequence handling.
18793  **/
18794 void
18795 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18796                         struct fc_frame_header *fc_hdr, bool aborted)
18797 {
18798         struct lpfc_hba *phba = vport->phba;
18799         struct lpfc_iocbq *ctiocb = NULL;
18800         struct lpfc_nodelist *ndlp;
18801         uint16_t oxid, rxid, xri, lxri;
18802         uint32_t sid, fctl;
18803         union lpfc_wqe128 *icmd;
18804         int rc;
18805
18806         if (!lpfc_is_link_up(phba))
18807                 return;
18808
18809         sid = sli4_sid_from_fc_hdr(fc_hdr);
18810         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18811         rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18812
18813         ndlp = lpfc_findnode_did(vport, sid);
18814         if (!ndlp) {
18815                 ndlp = lpfc_nlp_init(vport, sid);
18816                 if (!ndlp) {
18817                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18818                                          "1268 Failed to allocate ndlp for "
18819                                          "oxid:x%x SID:x%x\n", oxid, sid);
18820                         return;
18821                 }
18822                 /* Put ndlp onto pport node list */
18823                 lpfc_enqueue_node(vport, ndlp);
18824         }
18825
18826         /* Allocate buffer for rsp iocb */
18827         ctiocb = lpfc_sli_get_iocbq(phba);
18828         if (!ctiocb)
18829                 return;
18830
18831         icmd = &ctiocb->wqe;
18832
18833         /* Extract the F_CTL field from FC_HDR */
18834         fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18835
18836         ctiocb->ndlp = lpfc_nlp_get(ndlp);
18837         if (!ctiocb->ndlp) {
18838                 lpfc_sli_release_iocbq(phba, ctiocb);
18839                 return;
18840         }
18841
18842         ctiocb->vport = phba->pport;
18843         ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18844         ctiocb->sli4_lxritag = NO_XRI;
18845         ctiocb->sli4_xritag = NO_XRI;
18846         ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18847
18848         if (fctl & FC_FC_EX_CTX)
18849                 /* Exchange responder sent the abort so we
18850                  * own the oxid.
18851                  */
18852                 xri = oxid;
18853         else
18854                 xri = rxid;
18855         lxri = lpfc_sli4_xri_inrange(phba, xri);
18856         if (lxri != NO_XRI)
18857                 lpfc_set_rrq_active(phba, ndlp, lxri,
18858                         (xri == oxid) ? rxid : oxid, 0);
18859         /* For BA_ABTS from exchange responder, if the logical xri with
18860          * the oxid maps to the FCP XRI range, the port no longer has
18861          * that exchange context, send a BLS_RJT. Override the IOCB for
18862          * a BA_RJT.
18863          */
18864         if ((fctl & FC_FC_EX_CTX) &&
18865             (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18866                 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18867                 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18868                 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18869                        FC_BA_RJT_INV_XID);
18870                 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18871                        FC_BA_RJT_UNABLE);
18872         }
18873
18874         /* If BA_ABTS failed to abort a partially assembled receive sequence,
18875          * the driver no longer has that exchange, send a BLS_RJT. Override
18876          * the IOCB for a BA_RJT.
18877          */
18878         if (aborted == false) {
18879                 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18880                 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18881                 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18882                        FC_BA_RJT_INV_XID);
18883                 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18884                        FC_BA_RJT_UNABLE);
18885         }
18886
18887         if (fctl & FC_FC_EX_CTX) {
18888                 /* ABTS sent by responder to CT exchange, construction
18889                  * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18890                  * field and RX_ID from ABTS for RX_ID field.
18891                  */
18892                 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18893                 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18894         } else {
18895                 /* ABTS sent by initiator to CT exchange, construction
18896                  * of BA_ACC will need to allocate a new XRI as for the
18897                  * XRI_TAG field.
18898                  */
18899                 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18900         }
18901
18902         /* OX_ID is invariable to who sent ABTS to CT exchange */
18903         bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
18904         bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
18905
18906         /* Use CT=VPI */
18907         bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
18908                ndlp->nlp_DID);
18909         bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
18910                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
18911         bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
18912
18913         /* Xmit CT abts response on exchange <xid> */
18914         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18915                          "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18916                          ctiocb->abort_rctl, oxid, phba->link_state);
18917
18918         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18919         if (rc == IOCB_ERROR) {
18920                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18921                                  "2925 Failed to issue CT ABTS RSP x%x on "
18922                                  "xri x%x, Data x%x\n",
18923                                  ctiocb->abort_rctl, oxid,
18924                                  phba->link_state);
18925                 lpfc_nlp_put(ndlp);
18926                 ctiocb->ndlp = NULL;
18927                 lpfc_sli_release_iocbq(phba, ctiocb);
18928         }
18929 }
18930
18931 /**
18932  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18933  * @vport: Pointer to the vport on which this sequence was received
18934  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18935  *
18936  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18937  * receive sequence is only partially assembed by the driver, it shall abort
18938  * the partially assembled frames for the sequence. Otherwise, if the
18939  * unsolicited receive sequence has been completely assembled and passed to
18940  * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18941  * unsolicited sequence has been aborted. After that, it will issue a basic
18942  * accept to accept the abort.
18943  **/
18944 static void
18945 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18946                              struct hbq_dmabuf *dmabuf)
18947 {
18948         struct lpfc_hba *phba = vport->phba;
18949         struct fc_frame_header fc_hdr;
18950         uint32_t fctl;
18951         bool aborted;
18952
18953         /* Make a copy of fc_hdr before the dmabuf being released */
18954         memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18955         fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18956
18957         if (fctl & FC_FC_EX_CTX) {
18958                 /* ABTS by responder to exchange, no cleanup needed */
18959                 aborted = true;
18960         } else {
18961                 /* ABTS by initiator to exchange, need to do cleanup */
18962                 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18963                 if (aborted == false)
18964                         aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18965         }
18966         lpfc_in_buf_free(phba, &dmabuf->dbuf);
18967
18968         if (phba->nvmet_support) {
18969                 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18970                 return;
18971         }
18972
18973         /* Respond with BA_ACC or BA_RJT accordingly */
18974         lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18975 }
18976
18977 /**
18978  * lpfc_seq_complete - Indicates if a sequence is complete
18979  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18980  *
18981  * This function checks the sequence, starting with the frame described by
18982  * @dmabuf, to see if all the frames associated with this sequence are present.
18983  * the frames associated with this sequence are linked to the @dmabuf using the
18984  * dbuf list. This function looks for two major things. 1) That the first frame
18985  * has a sequence count of zero. 2) There is a frame with last frame of sequence
18986  * set. 3) That there are no holes in the sequence count. The function will
18987  * return 1 when the sequence is complete, otherwise it will return 0.
18988  **/
18989 static int
18990 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18991 {
18992         struct fc_frame_header *hdr;
18993         struct lpfc_dmabuf *d_buf;
18994         struct hbq_dmabuf *seq_dmabuf;
18995         uint32_t fctl;
18996         int seq_count = 0;
18997
18998         hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18999         /* make sure first fame of sequence has a sequence count of zero */
19000         if (hdr->fh_seq_cnt != seq_count)
19001                 return 0;
19002         fctl = (hdr->fh_f_ctl[0] << 16 |
19003                 hdr->fh_f_ctl[1] << 8 |
19004                 hdr->fh_f_ctl[2]);
19005         /* If last frame of sequence we can return success. */
19006         if (fctl & FC_FC_END_SEQ)
19007                 return 1;
19008         list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19009                 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19010                 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19011                 /* If there is a hole in the sequence count then fail. */
19012                 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19013                         return 0;
19014                 fctl = (hdr->fh_f_ctl[0] << 16 |
19015                         hdr->fh_f_ctl[1] << 8 |
19016                         hdr->fh_f_ctl[2]);
19017                 /* If last frame of sequence we can return success. */
19018                 if (fctl & FC_FC_END_SEQ)
19019                         return 1;
19020         }
19021         return 0;
19022 }
19023
19024 /**
19025  * lpfc_prep_seq - Prep sequence for ULP processing
19026  * @vport: Pointer to the vport on which this sequence was received
19027  * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19028  *
19029  * This function takes a sequence, described by a list of frames, and creates
19030  * a list of iocbq structures to describe the sequence. This iocbq list will be
19031  * used to issue to the generic unsolicited sequence handler. This routine
19032  * returns a pointer to the first iocbq in the list. If the function is unable
19033  * to allocate an iocbq then it throw out the received frames that were not
19034  * able to be described and return a pointer to the first iocbq. If unable to
19035  * allocate any iocbqs (including the first) this function will return NULL.
19036  **/
19037 static struct lpfc_iocbq *
19038 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19039 {
19040         struct hbq_dmabuf *hbq_buf;
19041         struct lpfc_dmabuf *d_buf, *n_buf;
19042         struct lpfc_iocbq *first_iocbq, *iocbq;
19043         struct fc_frame_header *fc_hdr;
19044         uint32_t sid;
19045         uint32_t len, tot_len;
19046
19047         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19048         /* remove from receive buffer list */
19049         list_del_init(&seq_dmabuf->hbuf.list);
19050         lpfc_update_rcv_time_stamp(vport);
19051         /* get the Remote Port's SID */
19052         sid = sli4_sid_from_fc_hdr(fc_hdr);
19053         tot_len = 0;
19054         /* Get an iocbq struct to fill in. */
19055         first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19056         if (first_iocbq) {
19057                 /* Initialize the first IOCB. */
19058                 first_iocbq->wcqe_cmpl.total_data_placed = 0;
19059                 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
19060                        IOSTAT_SUCCESS);
19061                 first_iocbq->vport = vport;
19062
19063                 /* Check FC Header to see what TYPE of frame we are rcv'ing */
19064                 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19065                         bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
19066                                sli4_did_from_fc_hdr(fc_hdr));
19067                 }
19068
19069                 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19070                        NO_XRI);
19071                 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19072                        be16_to_cpu(fc_hdr->fh_ox_id));
19073
19074                 /* put the first buffer into the first iocb */
19075                 tot_len = bf_get(lpfc_rcqe_length,
19076                                  &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19077
19078                 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
19079                 first_iocbq->bpl_dmabuf = NULL;
19080                 /* Keep track of the BDE count */
19081                 first_iocbq->wcqe_cmpl.word3 = 1;
19082
19083                 if (tot_len > LPFC_DATA_BUF_SIZE)
19084                         first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
19085                                 LPFC_DATA_BUF_SIZE;
19086                 else
19087                         first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
19088
19089                 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
19090                 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
19091                        sid);
19092         }
19093         iocbq = first_iocbq;
19094         /*
19095          * Each IOCBq can have two Buffers assigned, so go through the list
19096          * of buffers for this sequence and save two buffers in each IOCBq
19097          */
19098         list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19099                 if (!iocbq) {
19100                         lpfc_in_buf_free(vport->phba, d_buf);
19101                         continue;
19102                 }
19103                 if (!iocbq->bpl_dmabuf) {
19104                         iocbq->bpl_dmabuf = d_buf;
19105                         iocbq->wcqe_cmpl.word3++;
19106                         /* We need to get the size out of the right CQE */
19107                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19108                         len = bf_get(lpfc_rcqe_length,
19109                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
19110                         iocbq->unsol_rcv_len = len;
19111                         iocbq->wcqe_cmpl.total_data_placed += len;
19112                         tot_len += len;
19113                 } else {
19114                         iocbq = lpfc_sli_get_iocbq(vport->phba);
19115                         if (!iocbq) {
19116                                 if (first_iocbq) {
19117                                         bf_set(lpfc_wcqe_c_status,
19118                                                &first_iocbq->wcqe_cmpl,
19119                                                IOSTAT_SUCCESS);
19120                                         first_iocbq->wcqe_cmpl.parameter =
19121                                                 IOERR_NO_RESOURCES;
19122                                 }
19123                                 lpfc_in_buf_free(vport->phba, d_buf);
19124                                 continue;
19125                         }
19126                         /* We need to get the size out of the right CQE */
19127                         hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19128                         len = bf_get(lpfc_rcqe_length,
19129                                        &hbq_buf->cq_event.cqe.rcqe_cmpl);
19130                         iocbq->cmd_dmabuf = d_buf;
19131                         iocbq->bpl_dmabuf = NULL;
19132                         iocbq->wcqe_cmpl.word3 = 1;
19133
19134                         if (len > LPFC_DATA_BUF_SIZE)
19135                                 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19136                                         LPFC_DATA_BUF_SIZE;
19137                         else
19138                                 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19139                                         len;
19140
19141                         tot_len += len;
19142                         iocbq->wcqe_cmpl.total_data_placed = tot_len;
19143                         bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
19144                                sid);
19145                         list_add_tail(&iocbq->list, &first_iocbq->list);
19146                 }
19147         }
19148         /* Free the sequence's header buffer */
19149         if (!first_iocbq)
19150                 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19151
19152         return first_iocbq;
19153 }
19154
19155 static void
19156 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19157                           struct hbq_dmabuf *seq_dmabuf)
19158 {
19159         struct fc_frame_header *fc_hdr;
19160         struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19161         struct lpfc_hba *phba = vport->phba;
19162
19163         fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19164         iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19165         if (!iocbq) {
19166                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19167                                 "2707 Ring %d handler: Failed to allocate "
19168                                 "iocb Rctl x%x Type x%x received\n",
19169                                 LPFC_ELS_RING,
19170                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19171                 return;
19172         }
19173         if (!lpfc_complete_unsol_iocb(phba,
19174                                       phba->sli4_hba.els_wq->pring,
19175                                       iocbq, fc_hdr->fh_r_ctl,
19176                                       fc_hdr->fh_type)) {
19177                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19178                                 "2540 Ring %d handler: unexpected Rctl "
19179                                 "x%x Type x%x received\n",
19180                                 LPFC_ELS_RING,
19181                                 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19182                 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19183         }
19184
19185         /* Free iocb created in lpfc_prep_seq */
19186         list_for_each_entry_safe(curr_iocb, next_iocb,
19187                                  &iocbq->list, list) {
19188                 list_del_init(&curr_iocb->list);
19189                 lpfc_sli_release_iocbq(phba, curr_iocb);
19190         }
19191         lpfc_sli_release_iocbq(phba, iocbq);
19192 }
19193
19194 static void
19195 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19196                             struct lpfc_iocbq *rspiocb)
19197 {
19198         struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
19199
19200         if (pcmd && pcmd->virt)
19201                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19202         kfree(pcmd);
19203         lpfc_sli_release_iocbq(phba, cmdiocb);
19204         lpfc_drain_txq(phba);
19205 }
19206
19207 static void
19208 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19209                               struct hbq_dmabuf *dmabuf)
19210 {
19211         struct fc_frame_header *fc_hdr;
19212         struct lpfc_hba *phba = vport->phba;
19213         struct lpfc_iocbq *iocbq = NULL;
19214         union  lpfc_wqe128 *pwqe;
19215         struct lpfc_dmabuf *pcmd = NULL;
19216         uint32_t frame_len;
19217         int rc;
19218         unsigned long iflags;
19219
19220         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19221         frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19222
19223         /* Send the received frame back */
19224         iocbq = lpfc_sli_get_iocbq(phba);
19225         if (!iocbq) {
19226                 /* Queue cq event and wakeup worker thread to process it */
19227                 spin_lock_irqsave(&phba->hbalock, iflags);
19228                 list_add_tail(&dmabuf->cq_event.list,
19229                               &phba->sli4_hba.sp_queue_event);
19230                 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19231                 spin_unlock_irqrestore(&phba->hbalock, iflags);
19232                 lpfc_worker_wake_up(phba);
19233                 return;
19234         }
19235
19236         /* Allocate buffer for command payload */
19237         pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19238         if (pcmd)
19239                 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19240                                             &pcmd->phys);
19241         if (!pcmd || !pcmd->virt)
19242                 goto exit;
19243
19244         INIT_LIST_HEAD(&pcmd->list);
19245
19246         /* copyin the payload */
19247         memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19248
19249         iocbq->cmd_dmabuf = pcmd;
19250         iocbq->vport = vport;
19251         iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19252         iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19253         iocbq->num_bdes = 0;
19254
19255         pwqe = &iocbq->wqe;
19256         /* fill in BDE's for command */
19257         pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19258         pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19259         pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19260         pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19261
19262         pwqe->send_frame.frame_len = frame_len;
19263         pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19264         pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19265         pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19266         pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19267         pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19268         pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19269
19270         pwqe->generic.wqe_com.word7 = 0;
19271         pwqe->generic.wqe_com.word10 = 0;
19272
19273         bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19274         bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19275         bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19276         bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19277         bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19278         bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19279         bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19280         bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19281         bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19282         bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19283         bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19284         bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19285         pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19286
19287         iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19288
19289         rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19290         if (rc == IOCB_ERROR)
19291                 goto exit;
19292
19293         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19294         return;
19295
19296 exit:
19297         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19298                         "2023 Unable to process MDS loopback frame\n");
19299         if (pcmd && pcmd->virt)
19300                 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19301         kfree(pcmd);
19302         if (iocbq)
19303                 lpfc_sli_release_iocbq(phba, iocbq);
19304         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19305 }
19306
19307 /**
19308  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19309  * @phba: Pointer to HBA context object.
19310  * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19311  *
19312  * This function is called with no lock held. This function processes all
19313  * the received buffers and gives it to upper layers when a received buffer
19314  * indicates that it is the final frame in the sequence. The interrupt
19315  * service routine processes received buffers at interrupt contexts.
19316  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19317  * appropriate receive function when the final frame in a sequence is received.
19318  **/
19319 void
19320 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19321                                  struct hbq_dmabuf *dmabuf)
19322 {
19323         struct hbq_dmabuf *seq_dmabuf;
19324         struct fc_frame_header *fc_hdr;
19325         struct lpfc_vport *vport;
19326         uint32_t fcfi;
19327         uint32_t did;
19328
19329         /* Process each received buffer */
19330         fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19331
19332         if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19333             fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19334                 vport = phba->pport;
19335                 /* Handle MDS Loopback frames */
19336                 if  (!(phba->pport->load_flag & FC_UNLOADING))
19337                         lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19338                 else
19339                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19340                 return;
19341         }
19342
19343         /* check to see if this a valid type of frame */
19344         if (lpfc_fc_frame_check(phba, fc_hdr)) {
19345                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19346                 return;
19347         }
19348
19349         if ((bf_get(lpfc_cqe_code,
19350                     &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19351                 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19352                               &dmabuf->cq_event.cqe.rcqe_cmpl);
19353         else
19354                 fcfi = bf_get(lpfc_rcqe_fcf_id,
19355                               &dmabuf->cq_event.cqe.rcqe_cmpl);
19356
19357         if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19358                 vport = phba->pport;
19359                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19360                                 "2023 MDS Loopback %d bytes\n",
19361                                 bf_get(lpfc_rcqe_length,
19362                                        &dmabuf->cq_event.cqe.rcqe_cmpl));
19363                 /* Handle MDS Loopback frames */
19364                 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19365                 return;
19366         }
19367
19368         /* d_id this frame is directed to */
19369         did = sli4_did_from_fc_hdr(fc_hdr);
19370
19371         vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19372         if (!vport) {
19373                 /* throw out the frame */
19374                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19375                 return;
19376         }
19377
19378         /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19379         if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19380                 (did != Fabric_DID)) {
19381                 /*
19382                  * Throw out the frame if we are not pt2pt.
19383                  * The pt2pt protocol allows for discovery frames
19384                  * to be received without a registered VPI.
19385                  */
19386                 if (!(vport->fc_flag & FC_PT2PT) ||
19387                         (phba->link_state == LPFC_HBA_READY)) {
19388                         lpfc_in_buf_free(phba, &dmabuf->dbuf);
19389                         return;
19390                 }
19391         }
19392
19393         /* Handle the basic abort sequence (BA_ABTS) event */
19394         if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19395                 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19396                 return;
19397         }
19398
19399         /* Link this frame */
19400         seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19401         if (!seq_dmabuf) {
19402                 /* unable to add frame to vport - throw it out */
19403                 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19404                 return;
19405         }
19406         /* If not last frame in sequence continue processing frames. */
19407         if (!lpfc_seq_complete(seq_dmabuf))
19408                 return;
19409
19410         /* Send the complete sequence to the upper layer protocol */
19411         lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19412 }
19413
19414 /**
19415  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19416  * @phba: pointer to lpfc hba data structure.
19417  *
19418  * This routine is invoked to post rpi header templates to the
19419  * HBA consistent with the SLI-4 interface spec.  This routine
19420  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19421  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19422  *
19423  * This routine does not require any locks.  It's usage is expected
19424  * to be driver load or reset recovery when the driver is
19425  * sequential.
19426  *
19427  * Return codes
19428  *      0 - successful
19429  *      -EIO - The mailbox failed to complete successfully.
19430  *      When this error occurs, the driver is not guaranteed
19431  *      to have any rpi regions posted to the device and
19432  *      must either attempt to repost the regions or take a
19433  *      fatal error.
19434  **/
19435 int
19436 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19437 {
19438         struct lpfc_rpi_hdr *rpi_page;
19439         uint32_t rc = 0;
19440         uint16_t lrpi = 0;
19441
19442         /* SLI4 ports that support extents do not require RPI headers. */
19443         if (!phba->sli4_hba.rpi_hdrs_in_use)
19444                 goto exit;
19445         if (phba->sli4_hba.extents_in_use)
19446                 return -EIO;
19447
19448         list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19449                 /*
19450                  * Assign the rpi headers a physical rpi only if the driver
19451                  * has not initialized those resources.  A port reset only
19452                  * needs the headers posted.
19453                  */
19454                 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19455                     LPFC_RPI_RSRC_RDY)
19456                         rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19457
19458                 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19459                 if (rc != MBX_SUCCESS) {
19460                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19461                                         "2008 Error %d posting all rpi "
19462                                         "headers\n", rc);
19463                         rc = -EIO;
19464                         break;
19465                 }
19466         }
19467
19468  exit:
19469         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19470                LPFC_RPI_RSRC_RDY);
19471         return rc;
19472 }
19473
19474 /**
19475  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19476  * @phba: pointer to lpfc hba data structure.
19477  * @rpi_page:  pointer to the rpi memory region.
19478  *
19479  * This routine is invoked to post a single rpi header to the
19480  * HBA consistent with the SLI-4 interface spec.  This memory region
19481  * maps up to 64 rpi context regions.
19482  *
19483  * Return codes
19484  *      0 - successful
19485  *      -ENOMEM - No available memory
19486  *      -EIO - The mailbox failed to complete successfully.
19487  **/
19488 int
19489 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19490 {
19491         LPFC_MBOXQ_t *mboxq;
19492         struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19493         uint32_t rc = 0;
19494         uint32_t shdr_status, shdr_add_status;
19495         union lpfc_sli4_cfg_shdr *shdr;
19496
19497         /* SLI4 ports that support extents do not require RPI headers. */
19498         if (!phba->sli4_hba.rpi_hdrs_in_use)
19499                 return rc;
19500         if (phba->sli4_hba.extents_in_use)
19501                 return -EIO;
19502
19503         /* The port is notified of the header region via a mailbox command. */
19504         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19505         if (!mboxq) {
19506                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19507                                 "2001 Unable to allocate memory for issuing "
19508                                 "SLI_CONFIG_SPECIAL mailbox command\n");
19509                 return -ENOMEM;
19510         }
19511
19512         /* Post all rpi memory regions to the port. */
19513         hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19514         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19515                          LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19516                          sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19517                          sizeof(struct lpfc_sli4_cfg_mhdr),
19518                          LPFC_SLI4_MBX_EMBED);
19519
19520
19521         /* Post the physical rpi to the port for this rpi header. */
19522         bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19523                rpi_page->start_rpi);
19524         bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19525                hdr_tmpl, rpi_page->page_count);
19526
19527         hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19528         hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19529         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19530         shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19531         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19532         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19533         mempool_free(mboxq, phba->mbox_mem_pool);
19534         if (shdr_status || shdr_add_status || rc) {
19535                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19536                                 "2514 POST_RPI_HDR mailbox failed with "
19537                                 "status x%x add_status x%x, mbx status x%x\n",
19538                                 shdr_status, shdr_add_status, rc);
19539                 rc = -ENXIO;
19540         } else {
19541                 /*
19542                  * The next_rpi stores the next logical module-64 rpi value used
19543                  * to post physical rpis in subsequent rpi postings.
19544                  */
19545                 spin_lock_irq(&phba->hbalock);
19546                 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19547                 spin_unlock_irq(&phba->hbalock);
19548         }
19549         return rc;
19550 }
19551
19552 /**
19553  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19554  * @phba: pointer to lpfc hba data structure.
19555  *
19556  * This routine is invoked to post rpi header templates to the
19557  * HBA consistent with the SLI-4 interface spec.  This routine
19558  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19559  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19560  *
19561  * Returns
19562  *      A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19563  *      LPFC_RPI_ALLOC_ERROR if no rpis are available.
19564  **/
19565 int
19566 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19567 {
19568         unsigned long rpi;
19569         uint16_t max_rpi, rpi_limit;
19570         uint16_t rpi_remaining, lrpi = 0;
19571         struct lpfc_rpi_hdr *rpi_hdr;
19572         unsigned long iflag;
19573
19574         /*
19575          * Fetch the next logical rpi.  Because this index is logical,
19576          * the  driver starts at 0 each time.
19577          */
19578         spin_lock_irqsave(&phba->hbalock, iflag);
19579         max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19580         rpi_limit = phba->sli4_hba.next_rpi;
19581
19582         rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19583         if (rpi >= rpi_limit)
19584                 rpi = LPFC_RPI_ALLOC_ERROR;
19585         else {
19586                 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19587                 phba->sli4_hba.max_cfg_param.rpi_used++;
19588                 phba->sli4_hba.rpi_count++;
19589         }
19590         lpfc_printf_log(phba, KERN_INFO,
19591                         LOG_NODE | LOG_DISCOVERY,
19592                         "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19593                         (int) rpi, max_rpi, rpi_limit);
19594
19595         /*
19596          * Don't try to allocate more rpi header regions if the device limit
19597          * has been exhausted.
19598          */
19599         if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19600             (phba->sli4_hba.rpi_count >= max_rpi)) {
19601                 spin_unlock_irqrestore(&phba->hbalock, iflag);
19602                 return rpi;
19603         }
19604
19605         /*
19606          * RPI header postings are not required for SLI4 ports capable of
19607          * extents.
19608          */
19609         if (!phba->sli4_hba.rpi_hdrs_in_use) {
19610                 spin_unlock_irqrestore(&phba->hbalock, iflag);
19611                 return rpi;
19612         }
19613
19614         /*
19615          * If the driver is running low on rpi resources, allocate another
19616          * page now.  Note that the next_rpi value is used because
19617          * it represents how many are actually in use whereas max_rpi notes
19618          * how many are supported max by the device.
19619          */
19620         rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19621         spin_unlock_irqrestore(&phba->hbalock, iflag);
19622         if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19623                 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19624                 if (!rpi_hdr) {
19625                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19626                                         "2002 Error Could not grow rpi "
19627                                         "count\n");
19628                 } else {
19629                         lrpi = rpi_hdr->start_rpi;
19630                         rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19631                         lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19632                 }
19633         }
19634
19635         return rpi;
19636 }
19637
19638 /**
19639  * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19640  * @phba: pointer to lpfc hba data structure.
19641  * @rpi: rpi to free
19642  *
19643  * This routine is invoked to release an rpi to the pool of
19644  * available rpis maintained by the driver.
19645  **/
19646 static void
19647 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19648 {
19649         /*
19650          * if the rpi value indicates a prior unreg has already
19651          * been done, skip the unreg.
19652          */
19653         if (rpi == LPFC_RPI_ALLOC_ERROR)
19654                 return;
19655
19656         if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19657                 phba->sli4_hba.rpi_count--;
19658                 phba->sli4_hba.max_cfg_param.rpi_used--;
19659         } else {
19660                 lpfc_printf_log(phba, KERN_INFO,
19661                                 LOG_NODE | LOG_DISCOVERY,
19662                                 "2016 rpi %x not inuse\n",
19663                                 rpi);
19664         }
19665 }
19666
19667 /**
19668  * lpfc_sli4_free_rpi - Release an rpi for reuse.
19669  * @phba: pointer to lpfc hba data structure.
19670  * @rpi: rpi to free
19671  *
19672  * This routine is invoked to release an rpi to the pool of
19673  * available rpis maintained by the driver.
19674  **/
19675 void
19676 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19677 {
19678         spin_lock_irq(&phba->hbalock);
19679         __lpfc_sli4_free_rpi(phba, rpi);
19680         spin_unlock_irq(&phba->hbalock);
19681 }
19682
19683 /**
19684  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19685  * @phba: pointer to lpfc hba data structure.
19686  *
19687  * This routine is invoked to remove the memory region that
19688  * provided rpi via a bitmask.
19689  **/
19690 void
19691 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19692 {
19693         kfree(phba->sli4_hba.rpi_bmask);
19694         kfree(phba->sli4_hba.rpi_ids);
19695         bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19696 }
19697
19698 /**
19699  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19700  * @ndlp: pointer to lpfc nodelist data structure.
19701  * @cmpl: completion call-back.
19702  * @arg: data to load as MBox 'caller buffer information'
19703  *
19704  * This routine is invoked to remove the memory region that
19705  * provided rpi via a bitmask.
19706  **/
19707 int
19708 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19709         void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19710 {
19711         LPFC_MBOXQ_t *mboxq;
19712         struct lpfc_hba *phba = ndlp->phba;
19713         int rc;
19714
19715         /* The port is notified of the header region via a mailbox command. */
19716         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19717         if (!mboxq)
19718                 return -ENOMEM;
19719
19720         /* If cmpl assigned, then this nlp_get pairs with
19721          * lpfc_mbx_cmpl_resume_rpi.
19722          *
19723          * Else cmpl is NULL, then this nlp_get pairs with
19724          * lpfc_sli_def_mbox_cmpl.
19725          */
19726         if (!lpfc_nlp_get(ndlp)) {
19727                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19728                                 "2122 %s: Failed to get nlp ref\n",
19729                                 __func__);
19730                 mempool_free(mboxq, phba->mbox_mem_pool);
19731                 return -EIO;
19732         }
19733
19734         /* Post all rpi memory regions to the port. */
19735         lpfc_resume_rpi(mboxq, ndlp);
19736         if (cmpl) {
19737                 mboxq->mbox_cmpl = cmpl;
19738                 mboxq->ctx_buf = arg;
19739         } else
19740                 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19741         mboxq->ctx_ndlp = ndlp;
19742         mboxq->vport = ndlp->vport;
19743         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19744         if (rc == MBX_NOT_FINISHED) {
19745                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19746                                 "2010 Resume RPI Mailbox failed "
19747                                 "status %d, mbxStatus x%x\n", rc,
19748                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19749                 lpfc_nlp_put(ndlp);
19750                 mempool_free(mboxq, phba->mbox_mem_pool);
19751                 return -EIO;
19752         }
19753         return 0;
19754 }
19755
19756 /**
19757  * lpfc_sli4_init_vpi - Initialize a vpi with the port
19758  * @vport: Pointer to the vport for which the vpi is being initialized
19759  *
19760  * This routine is invoked to activate a vpi with the port.
19761  *
19762  * Returns:
19763  *    0 success
19764  *    -Evalue otherwise
19765  **/
19766 int
19767 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19768 {
19769         LPFC_MBOXQ_t *mboxq;
19770         int rc = 0;
19771         int retval = MBX_SUCCESS;
19772         uint32_t mbox_tmo;
19773         struct lpfc_hba *phba = vport->phba;
19774         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19775         if (!mboxq)
19776                 return -ENOMEM;
19777         lpfc_init_vpi(phba, mboxq, vport->vpi);
19778         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19779         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19780         if (rc != MBX_SUCCESS) {
19781                 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19782                                 "2022 INIT VPI Mailbox failed "
19783                                 "status %d, mbxStatus x%x\n", rc,
19784                                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19785                 retval = -EIO;
19786         }
19787         if (rc != MBX_TIMEOUT)
19788                 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19789
19790         return retval;
19791 }
19792
19793 /**
19794  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19795  * @phba: pointer to lpfc hba data structure.
19796  * @mboxq: Pointer to mailbox object.
19797  *
19798  * This routine is invoked to manually add a single FCF record. The caller
19799  * must pass a completely initialized FCF_Record.  This routine takes
19800  * care of the nonembedded mailbox operations.
19801  **/
19802 static void
19803 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19804 {
19805         void *virt_addr;
19806         union lpfc_sli4_cfg_shdr *shdr;
19807         uint32_t shdr_status, shdr_add_status;
19808
19809         virt_addr = mboxq->sge_array->addr[0];
19810         /* The IOCTL status is embedded in the mailbox subheader. */
19811         shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19812         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19813         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19814
19815         if ((shdr_status || shdr_add_status) &&
19816                 (shdr_status != STATUS_FCF_IN_USE))
19817                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19818                         "2558 ADD_FCF_RECORD mailbox failed with "
19819                         "status x%x add_status x%x\n",
19820                         shdr_status, shdr_add_status);
19821
19822         lpfc_sli4_mbox_cmd_free(phba, mboxq);
19823 }
19824
19825 /**
19826  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19827  * @phba: pointer to lpfc hba data structure.
19828  * @fcf_record:  pointer to the initialized fcf record to add.
19829  *
19830  * This routine is invoked to manually add a single FCF record. The caller
19831  * must pass a completely initialized FCF_Record.  This routine takes
19832  * care of the nonembedded mailbox operations.
19833  **/
19834 int
19835 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19836 {
19837         int rc = 0;
19838         LPFC_MBOXQ_t *mboxq;
19839         uint8_t *bytep;
19840         void *virt_addr;
19841         struct lpfc_mbx_sge sge;
19842         uint32_t alloc_len, req_len;
19843         uint32_t fcfindex;
19844
19845         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19846         if (!mboxq) {
19847                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19848                         "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19849                 return -ENOMEM;
19850         }
19851
19852         req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19853                   sizeof(uint32_t);
19854
19855         /* Allocate DMA memory and set up the non-embedded mailbox command */
19856         alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19857                                      LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19858                                      req_len, LPFC_SLI4_MBX_NEMBED);
19859         if (alloc_len < req_len) {
19860                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19861                         "2523 Allocated DMA memory size (x%x) is "
19862                         "less than the requested DMA memory "
19863                         "size (x%x)\n", alloc_len, req_len);
19864                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19865                 return -ENOMEM;
19866         }
19867
19868         /*
19869          * Get the first SGE entry from the non-embedded DMA memory.  This
19870          * routine only uses a single SGE.
19871          */
19872         lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19873         virt_addr = mboxq->sge_array->addr[0];
19874         /*
19875          * Configure the FCF record for FCFI 0.  This is the driver's
19876          * hardcoded default and gets used in nonFIP mode.
19877          */
19878         fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19879         bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19880         lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19881
19882         /*
19883          * Copy the fcf_index and the FCF Record Data. The data starts after
19884          * the FCoE header plus word10. The data copy needs to be endian
19885          * correct.
19886          */
19887         bytep += sizeof(uint32_t);
19888         lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19889         mboxq->vport = phba->pport;
19890         mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19891         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19892         if (rc == MBX_NOT_FINISHED) {
19893                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19894                         "2515 ADD_FCF_RECORD mailbox failed with "
19895                         "status 0x%x\n", rc);
19896                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19897                 rc = -EIO;
19898         } else
19899                 rc = 0;
19900
19901         return rc;
19902 }
19903
19904 /**
19905  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19906  * @phba: pointer to lpfc hba data structure.
19907  * @fcf_record:  pointer to the fcf record to write the default data.
19908  * @fcf_index: FCF table entry index.
19909  *
19910  * This routine is invoked to build the driver's default FCF record.  The
19911  * values used are hardcoded.  This routine handles memory initialization.
19912  *
19913  **/
19914 void
19915 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19916                                 struct fcf_record *fcf_record,
19917                                 uint16_t fcf_index)
19918 {
19919         memset(fcf_record, 0, sizeof(struct fcf_record));
19920         fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19921         fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19922         fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19923         bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19924         bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19925         bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19926         bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19927         bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19928         bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19929         bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19930         bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19931         bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19932         bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19933         bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19934         bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19935         bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19936                 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19937         /* Set the VLAN bit map */
19938         if (phba->valid_vlan) {
19939                 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19940                         = 1 << (phba->vlan_id % 8);
19941         }
19942 }
19943
19944 /**
19945  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19946  * @phba: pointer to lpfc hba data structure.
19947  * @fcf_index: FCF table entry offset.
19948  *
19949  * This routine is invoked to scan the entire FCF table by reading FCF
19950  * record and processing it one at a time starting from the @fcf_index
19951  * for initial FCF discovery or fast FCF failover rediscovery.
19952  *
19953  * Return 0 if the mailbox command is submitted successfully, none 0
19954  * otherwise.
19955  **/
19956 int
19957 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19958 {
19959         int rc = 0, error;
19960         LPFC_MBOXQ_t *mboxq;
19961
19962         phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19963         phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19964         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19965         if (!mboxq) {
19966                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19967                                 "2000 Failed to allocate mbox for "
19968                                 "READ_FCF cmd\n");
19969                 error = -ENOMEM;
19970                 goto fail_fcf_scan;
19971         }
19972         /* Construct the read FCF record mailbox command */
19973         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19974         if (rc) {
19975                 error = -EINVAL;
19976                 goto fail_fcf_scan;
19977         }
19978         /* Issue the mailbox command asynchronously */
19979         mboxq->vport = phba->pport;
19980         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19981
19982         spin_lock_irq(&phba->hbalock);
19983         phba->hba_flag |= FCF_TS_INPROG;
19984         spin_unlock_irq(&phba->hbalock);
19985
19986         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19987         if (rc == MBX_NOT_FINISHED)
19988                 error = -EIO;
19989         else {
19990                 /* Reset eligible FCF count for new scan */
19991                 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19992                         phba->fcf.eligible_fcf_cnt = 0;
19993                 error = 0;
19994         }
19995 fail_fcf_scan:
19996         if (error) {
19997                 if (mboxq)
19998                         lpfc_sli4_mbox_cmd_free(phba, mboxq);
19999                 /* FCF scan failed, clear FCF_TS_INPROG flag */
20000                 spin_lock_irq(&phba->hbalock);
20001                 phba->hba_flag &= ~FCF_TS_INPROG;
20002                 spin_unlock_irq(&phba->hbalock);
20003         }
20004         return error;
20005 }
20006
20007 /**
20008  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20009  * @phba: pointer to lpfc hba data structure.
20010  * @fcf_index: FCF table entry offset.
20011  *
20012  * This routine is invoked to read an FCF record indicated by @fcf_index
20013  * and to use it for FLOGI roundrobin FCF failover.
20014  *
20015  * Return 0 if the mailbox command is submitted successfully, none 0
20016  * otherwise.
20017  **/
20018 int
20019 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20020 {
20021         int rc = 0, error;
20022         LPFC_MBOXQ_t *mboxq;
20023
20024         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20025         if (!mboxq) {
20026                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20027                                 "2763 Failed to allocate mbox for "
20028                                 "READ_FCF cmd\n");
20029                 error = -ENOMEM;
20030                 goto fail_fcf_read;
20031         }
20032         /* Construct the read FCF record mailbox command */
20033         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20034         if (rc) {
20035                 error = -EINVAL;
20036                 goto fail_fcf_read;
20037         }
20038         /* Issue the mailbox command asynchronously */
20039         mboxq->vport = phba->pport;
20040         mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20041         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20042         if (rc == MBX_NOT_FINISHED)
20043                 error = -EIO;
20044         else
20045                 error = 0;
20046
20047 fail_fcf_read:
20048         if (error && mboxq)
20049                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20050         return error;
20051 }
20052
20053 /**
20054  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20055  * @phba: pointer to lpfc hba data structure.
20056  * @fcf_index: FCF table entry offset.
20057  *
20058  * This routine is invoked to read an FCF record indicated by @fcf_index to
20059  * determine whether it's eligible for FLOGI roundrobin failover list.
20060  *
20061  * Return 0 if the mailbox command is submitted successfully, none 0
20062  * otherwise.
20063  **/
20064 int
20065 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20066 {
20067         int rc = 0, error;
20068         LPFC_MBOXQ_t *mboxq;
20069
20070         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20071         if (!mboxq) {
20072                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20073                                 "2758 Failed to allocate mbox for "
20074                                 "READ_FCF cmd\n");
20075                                 error = -ENOMEM;
20076                                 goto fail_fcf_read;
20077         }
20078         /* Construct the read FCF record mailbox command */
20079         rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20080         if (rc) {
20081                 error = -EINVAL;
20082                 goto fail_fcf_read;
20083         }
20084         /* Issue the mailbox command asynchronously */
20085         mboxq->vport = phba->pport;
20086         mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20087         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20088         if (rc == MBX_NOT_FINISHED)
20089                 error = -EIO;
20090         else
20091                 error = 0;
20092
20093 fail_fcf_read:
20094         if (error && mboxq)
20095                 lpfc_sli4_mbox_cmd_free(phba, mboxq);
20096         return error;
20097 }
20098
20099 /**
20100  * lpfc_check_next_fcf_pri_level
20101  * @phba: pointer to the lpfc_hba struct for this port.
20102  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20103  * routine when the rr_bmask is empty. The FCF indecies are put into the
20104  * rr_bmask based on their priority level. Starting from the highest priority
20105  * to the lowest. The most likely FCF candidate will be in the highest
20106  * priority group. When this routine is called it searches the fcf_pri list for
20107  * next lowest priority group and repopulates the rr_bmask with only those
20108  * fcf_indexes.
20109  * returns:
20110  * 1=success 0=failure
20111  **/
20112 static int
20113 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20114 {
20115         uint16_t next_fcf_pri;
20116         uint16_t last_index;
20117         struct lpfc_fcf_pri *fcf_pri;
20118         int rc;
20119         int ret = 0;
20120
20121         last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20122                         LPFC_SLI4_FCF_TBL_INDX_MAX);
20123         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20124                         "3060 Last IDX %d\n", last_index);
20125
20126         /* Verify the priority list has 2 or more entries */
20127         spin_lock_irq(&phba->hbalock);
20128         if (list_empty(&phba->fcf.fcf_pri_list) ||
20129             list_is_singular(&phba->fcf.fcf_pri_list)) {
20130                 spin_unlock_irq(&phba->hbalock);
20131                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20132                         "3061 Last IDX %d\n", last_index);
20133                 return 0; /* Empty rr list */
20134         }
20135         spin_unlock_irq(&phba->hbalock);
20136
20137         next_fcf_pri = 0;
20138         /*
20139          * Clear the rr_bmask and set all of the bits that are at this
20140          * priority.
20141          */
20142         memset(phba->fcf.fcf_rr_bmask, 0,
20143                         sizeof(*phba->fcf.fcf_rr_bmask));
20144         spin_lock_irq(&phba->hbalock);
20145         list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20146                 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20147                         continue;
20148                 /*
20149                  * the 1st priority that has not FLOGI failed
20150                  * will be the highest.
20151                  */
20152                 if (!next_fcf_pri)
20153                         next_fcf_pri = fcf_pri->fcf_rec.priority;
20154                 spin_unlock_irq(&phba->hbalock);
20155                 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20156                         rc = lpfc_sli4_fcf_rr_index_set(phba,
20157                                                 fcf_pri->fcf_rec.fcf_index);
20158                         if (rc)
20159                                 return 0;
20160                 }
20161                 spin_lock_irq(&phba->hbalock);
20162         }
20163         /*
20164          * if next_fcf_pri was not set above and the list is not empty then
20165          * we have failed flogis on all of them. So reset flogi failed
20166          * and start at the beginning.
20167          */
20168         if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20169                 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20170                         fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20171                         /*
20172                          * the 1st priority that has not FLOGI failed
20173                          * will be the highest.
20174                          */
20175                         if (!next_fcf_pri)
20176                                 next_fcf_pri = fcf_pri->fcf_rec.priority;
20177                         spin_unlock_irq(&phba->hbalock);
20178                         if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20179                                 rc = lpfc_sli4_fcf_rr_index_set(phba,
20180                                                 fcf_pri->fcf_rec.fcf_index);
20181                                 if (rc)
20182                                         return 0;
20183                         }
20184                         spin_lock_irq(&phba->hbalock);
20185                 }
20186         } else
20187                 ret = 1;
20188         spin_unlock_irq(&phba->hbalock);
20189
20190         return ret;
20191 }
20192 /**
20193  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20194  * @phba: pointer to lpfc hba data structure.
20195  *
20196  * This routine is to get the next eligible FCF record index in a round
20197  * robin fashion. If the next eligible FCF record index equals to the
20198  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20199  * shall be returned, otherwise, the next eligible FCF record's index
20200  * shall be returned.
20201  **/
20202 uint16_t
20203 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20204 {
20205         uint16_t next_fcf_index;
20206
20207 initial_priority:
20208         /* Search start from next bit of currently registered FCF index */
20209         next_fcf_index = phba->fcf.current_rec.fcf_indx;
20210
20211 next_priority:
20212         /* Determine the next fcf index to check */
20213         next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20214         next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20215                                        LPFC_SLI4_FCF_TBL_INDX_MAX,
20216                                        next_fcf_index);
20217
20218         /* Wrap around condition on phba->fcf.fcf_rr_bmask */
20219         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20220                 /*
20221                  * If we have wrapped then we need to clear the bits that
20222                  * have been tested so that we can detect when we should
20223                  * change the priority level.
20224                  */
20225                 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20226                                                LPFC_SLI4_FCF_TBL_INDX_MAX);
20227         }
20228
20229
20230         /* Check roundrobin failover list empty condition */
20231         if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20232                 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20233                 /*
20234                  * If next fcf index is not found check if there are lower
20235                  * Priority level fcf's in the fcf_priority list.
20236                  * Set up the rr_bmask with all of the avaiable fcf bits
20237                  * at that level and continue the selection process.
20238                  */
20239                 if (lpfc_check_next_fcf_pri_level(phba))
20240                         goto initial_priority;
20241                 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20242                                 "2844 No roundrobin failover FCF available\n");
20243
20244                 return LPFC_FCOE_FCF_NEXT_NONE;
20245         }
20246
20247         if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20248                 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20249                 LPFC_FCF_FLOGI_FAILED) {
20250                 if (list_is_singular(&phba->fcf.fcf_pri_list))
20251                         return LPFC_FCOE_FCF_NEXT_NONE;
20252
20253                 goto next_priority;
20254         }
20255
20256         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20257                         "2845 Get next roundrobin failover FCF (x%x)\n",
20258                         next_fcf_index);
20259
20260         return next_fcf_index;
20261 }
20262
20263 /**
20264  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20265  * @phba: pointer to lpfc hba data structure.
20266  * @fcf_index: index into the FCF table to 'set'
20267  *
20268  * This routine sets the FCF record index in to the eligible bmask for
20269  * roundrobin failover search. It checks to make sure that the index
20270  * does not go beyond the range of the driver allocated bmask dimension
20271  * before setting the bit.
20272  *
20273  * Returns 0 if the index bit successfully set, otherwise, it returns
20274  * -EINVAL.
20275  **/
20276 int
20277 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20278 {
20279         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20280                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20281                                 "2610 FCF (x%x) reached driver's book "
20282                                 "keeping dimension:x%x\n",
20283                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20284                 return -EINVAL;
20285         }
20286         /* Set the eligible FCF record index bmask */
20287         set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20288
20289         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20290                         "2790 Set FCF (x%x) to roundrobin FCF failover "
20291                         "bmask\n", fcf_index);
20292
20293         return 0;
20294 }
20295
20296 /**
20297  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20298  * @phba: pointer to lpfc hba data structure.
20299  * @fcf_index: index into the FCF table to 'clear'
20300  *
20301  * This routine clears the FCF record index from the eligible bmask for
20302  * roundrobin failover search. It checks to make sure that the index
20303  * does not go beyond the range of the driver allocated bmask dimension
20304  * before clearing the bit.
20305  **/
20306 void
20307 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20308 {
20309         struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20310         if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20311                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20312                                 "2762 FCF (x%x) reached driver's book "
20313                                 "keeping dimension:x%x\n",
20314                                 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20315                 return;
20316         }
20317         /* Clear the eligible FCF record index bmask */
20318         spin_lock_irq(&phba->hbalock);
20319         list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20320                                  list) {
20321                 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20322                         list_del_init(&fcf_pri->list);
20323                         break;
20324                 }
20325         }
20326         spin_unlock_irq(&phba->hbalock);
20327         clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20328
20329         lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20330                         "2791 Clear FCF (x%x) from roundrobin failover "
20331                         "bmask\n", fcf_index);
20332 }
20333
20334 /**
20335  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20336  * @phba: pointer to lpfc hba data structure.
20337  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20338  *
20339  * This routine is the completion routine for the rediscover FCF table mailbox
20340  * command. If the mailbox command returned failure, it will try to stop the
20341  * FCF rediscover wait timer.
20342  **/
20343 static void
20344 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20345 {
20346         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20347         uint32_t shdr_status, shdr_add_status;
20348
20349         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20350
20351         shdr_status = bf_get(lpfc_mbox_hdr_status,
20352                              &redisc_fcf->header.cfg_shdr.response);
20353         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20354                              &redisc_fcf->header.cfg_shdr.response);
20355         if (shdr_status || shdr_add_status) {
20356                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20357                                 "2746 Requesting for FCF rediscovery failed "
20358                                 "status x%x add_status x%x\n",
20359                                 shdr_status, shdr_add_status);
20360                 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20361                         spin_lock_irq(&phba->hbalock);
20362                         phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20363                         spin_unlock_irq(&phba->hbalock);
20364                         /*
20365                          * CVL event triggered FCF rediscover request failed,
20366                          * last resort to re-try current registered FCF entry.
20367                          */
20368                         lpfc_retry_pport_discovery(phba);
20369                 } else {
20370                         spin_lock_irq(&phba->hbalock);
20371                         phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20372                         spin_unlock_irq(&phba->hbalock);
20373                         /*
20374                          * DEAD FCF event triggered FCF rediscover request
20375                          * failed, last resort to fail over as a link down
20376                          * to FCF registration.
20377                          */
20378                         lpfc_sli4_fcf_dead_failthrough(phba);
20379                 }
20380         } else {
20381                 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20382                                 "2775 Start FCF rediscover quiescent timer\n");
20383                 /*
20384                  * Start FCF rediscovery wait timer for pending FCF
20385                  * before rescan FCF record table.
20386                  */
20387                 lpfc_fcf_redisc_wait_start_timer(phba);
20388         }
20389
20390         mempool_free(mbox, phba->mbox_mem_pool);
20391 }
20392
20393 /**
20394  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20395  * @phba: pointer to lpfc hba data structure.
20396  *
20397  * This routine is invoked to request for rediscovery of the entire FCF table
20398  * by the port.
20399  **/
20400 int
20401 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20402 {
20403         LPFC_MBOXQ_t *mbox;
20404         struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20405         int rc, length;
20406
20407         /* Cancel retry delay timers to all vports before FCF rediscover */
20408         lpfc_cancel_all_vport_retry_delay_timer(phba);
20409
20410         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20411         if (!mbox) {
20412                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20413                                 "2745 Failed to allocate mbox for "
20414                                 "requesting FCF rediscover.\n");
20415                 return -ENOMEM;
20416         }
20417
20418         length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20419                   sizeof(struct lpfc_sli4_cfg_mhdr));
20420         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20421                          LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20422                          length, LPFC_SLI4_MBX_EMBED);
20423
20424         redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20425         /* Set count to 0 for invalidating the entire FCF database */
20426         bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20427
20428         /* Issue the mailbox command asynchronously */
20429         mbox->vport = phba->pport;
20430         mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20431         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20432
20433         if (rc == MBX_NOT_FINISHED) {
20434                 mempool_free(mbox, phba->mbox_mem_pool);
20435                 return -EIO;
20436         }
20437         return 0;
20438 }
20439
20440 /**
20441  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20442  * @phba: pointer to lpfc hba data structure.
20443  *
20444  * This function is the failover routine as a last resort to the FCF DEAD
20445  * event when driver failed to perform fast FCF failover.
20446  **/
20447 void
20448 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20449 {
20450         uint32_t link_state;
20451
20452         /*
20453          * Last resort as FCF DEAD event failover will treat this as
20454          * a link down, but save the link state because we don't want
20455          * it to be changed to Link Down unless it is already down.
20456          */
20457         link_state = phba->link_state;
20458         lpfc_linkdown(phba);
20459         phba->link_state = link_state;
20460
20461         /* Unregister FCF if no devices connected to it */
20462         lpfc_unregister_unused_fcf(phba);
20463 }
20464
20465 /**
20466  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20467  * @phba: pointer to lpfc hba data structure.
20468  * @rgn23_data: pointer to configure region 23 data.
20469  *
20470  * This function gets SLI3 port configure region 23 data through memory dump
20471  * mailbox command. When it successfully retrieves data, the size of the data
20472  * will be returned, otherwise, 0 will be returned.
20473  **/
20474 static uint32_t
20475 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20476 {
20477         LPFC_MBOXQ_t *pmb = NULL;
20478         MAILBOX_t *mb;
20479         uint32_t offset = 0;
20480         int rc;
20481
20482         if (!rgn23_data)
20483                 return 0;
20484
20485         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20486         if (!pmb) {
20487                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20488                                 "2600 failed to allocate mailbox memory\n");
20489                 return 0;
20490         }
20491         mb = &pmb->u.mb;
20492
20493         do {
20494                 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20495                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20496
20497                 if (rc != MBX_SUCCESS) {
20498                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20499                                         "2601 failed to read config "
20500                                         "region 23, rc 0x%x Status 0x%x\n",
20501                                         rc, mb->mbxStatus);
20502                         mb->un.varDmp.word_cnt = 0;
20503                 }
20504                 /*
20505                  * dump mem may return a zero when finished or we got a
20506                  * mailbox error, either way we are done.
20507                  */
20508                 if (mb->un.varDmp.word_cnt == 0)
20509                         break;
20510
20511                 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20512                         mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20513
20514                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20515                                        rgn23_data + offset,
20516                                        mb->un.varDmp.word_cnt);
20517                 offset += mb->un.varDmp.word_cnt;
20518         } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20519
20520         mempool_free(pmb, phba->mbox_mem_pool);
20521         return offset;
20522 }
20523
20524 /**
20525  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20526  * @phba: pointer to lpfc hba data structure.
20527  * @rgn23_data: pointer to configure region 23 data.
20528  *
20529  * This function gets SLI4 port configure region 23 data through memory dump
20530  * mailbox command. When it successfully retrieves data, the size of the data
20531  * will be returned, otherwise, 0 will be returned.
20532  **/
20533 static uint32_t
20534 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20535 {
20536         LPFC_MBOXQ_t *mboxq = NULL;
20537         struct lpfc_dmabuf *mp = NULL;
20538         struct lpfc_mqe *mqe;
20539         uint32_t data_length = 0;
20540         int rc;
20541
20542         if (!rgn23_data)
20543                 return 0;
20544
20545         mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20546         if (!mboxq) {
20547                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20548                                 "3105 failed to allocate mailbox memory\n");
20549                 return 0;
20550         }
20551
20552         if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20553                 goto out;
20554         mqe = &mboxq->u.mqe;
20555         mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20556         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20557         if (rc)
20558                 goto out;
20559         data_length = mqe->un.mb_words[5];
20560         if (data_length == 0)
20561                 goto out;
20562         if (data_length > DMP_RGN23_SIZE) {
20563                 data_length = 0;
20564                 goto out;
20565         }
20566         lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20567 out:
20568         lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20569         return data_length;
20570 }
20571
20572 /**
20573  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20574  * @phba: pointer to lpfc hba data structure.
20575  *
20576  * This function read region 23 and parse TLV for port status to
20577  * decide if the user disaled the port. If the TLV indicates the
20578  * port is disabled, the hba_flag is set accordingly.
20579  **/
20580 void
20581 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20582 {
20583         uint8_t *rgn23_data = NULL;
20584         uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20585         uint32_t offset = 0;
20586
20587         /* Get adapter Region 23 data */
20588         rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20589         if (!rgn23_data)
20590                 goto out;
20591
20592         if (phba->sli_rev < LPFC_SLI_REV4)
20593                 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20594         else {
20595                 if_type = bf_get(lpfc_sli_intf_if_type,
20596                                  &phba->sli4_hba.sli_intf);
20597                 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20598                         goto out;
20599                 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20600         }
20601
20602         if (!data_size)
20603                 goto out;
20604
20605         /* Check the region signature first */
20606         if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20607                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20608                         "2619 Config region 23 has bad signature\n");
20609                         goto out;
20610         }
20611         offset += 4;
20612
20613         /* Check the data structure version */
20614         if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20615                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20616                         "2620 Config region 23 has bad version\n");
20617                 goto out;
20618         }
20619         offset += 4;
20620
20621         /* Parse TLV entries in the region */
20622         while (offset < data_size) {
20623                 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20624                         break;
20625                 /*
20626                  * If the TLV is not driver specific TLV or driver id is
20627                  * not linux driver id, skip the record.
20628                  */
20629                 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20630                     (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20631                     (rgn23_data[offset + 3] != 0)) {
20632                         offset += rgn23_data[offset + 1] * 4 + 4;
20633                         continue;
20634                 }
20635
20636                 /* Driver found a driver specific TLV in the config region */
20637                 sub_tlv_len = rgn23_data[offset + 1] * 4;
20638                 offset += 4;
20639                 tlv_offset = 0;
20640
20641                 /*
20642                  * Search for configured port state sub-TLV.
20643                  */
20644                 while ((offset < data_size) &&
20645                         (tlv_offset < sub_tlv_len)) {
20646                         if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20647                                 offset += 4;
20648                                 tlv_offset += 4;
20649                                 break;
20650                         }
20651                         if (rgn23_data[offset] != PORT_STE_TYPE) {
20652                                 offset += rgn23_data[offset + 1] * 4 + 4;
20653                                 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20654                                 continue;
20655                         }
20656
20657                         /* This HBA contains PORT_STE configured */
20658                         if (!rgn23_data[offset + 2])
20659                                 phba->hba_flag |= LINK_DISABLED;
20660
20661                         goto out;
20662                 }
20663         }
20664
20665 out:
20666         kfree(rgn23_data);
20667         return;
20668 }
20669
20670 /**
20671  * lpfc_log_fw_write_cmpl - logs firmware write completion status
20672  * @phba: pointer to lpfc hba data structure
20673  * @shdr_status: wr_object rsp's status field
20674  * @shdr_add_status: wr_object rsp's add_status field
20675  * @shdr_add_status_2: wr_object rsp's add_status_2 field
20676  * @shdr_change_status: wr_object rsp's change_status field
20677  * @shdr_csf: wr_object rsp's csf bit
20678  *
20679  * This routine is intended to be called after a firmware write completes.
20680  * It will log next action items to be performed by the user to instantiate
20681  * the newly downloaded firmware or reason for incompatibility.
20682  **/
20683 static void
20684 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20685                        u32 shdr_add_status, u32 shdr_add_status_2,
20686                        u32 shdr_change_status, u32 shdr_csf)
20687 {
20688         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20689                         "4198 %s: flash_id x%02x, asic_rev x%02x, "
20690                         "status x%02x, add_status x%02x, add_status_2 x%02x, "
20691                         "change_status x%02x, csf %01x\n", __func__,
20692                         phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20693                         shdr_status, shdr_add_status, shdr_add_status_2,
20694                         shdr_change_status, shdr_csf);
20695
20696         if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20697                 switch (shdr_add_status_2) {
20698                 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20699                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20700                                         "4199 Firmware write failed: "
20701                                         "image incompatible with flash x%02x\n",
20702                                         phba->sli4_hba.flash_id);
20703                         break;
20704                 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20705                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20706                                         "4200 Firmware write failed: "
20707                                         "image incompatible with ASIC "
20708                                         "architecture x%02x\n",
20709                                         phba->sli4_hba.asic_rev);
20710                         break;
20711                 default:
20712                         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20713                                         "4210 Firmware write failed: "
20714                                         "add_status_2 x%02x\n",
20715                                         shdr_add_status_2);
20716                         break;
20717                 }
20718         } else if (!shdr_status && !shdr_add_status) {
20719                 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20720                     shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20721                         if (shdr_csf)
20722                                 shdr_change_status =
20723                                                    LPFC_CHANGE_STATUS_PCI_RESET;
20724                 }
20725
20726                 switch (shdr_change_status) {
20727                 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20728                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20729                                         "3198 Firmware write complete: System "
20730                                         "reboot required to instantiate\n");
20731                         break;
20732                 case (LPFC_CHANGE_STATUS_FW_RESET):
20733                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20734                                         "3199 Firmware write complete: "
20735                                         "Firmware reset required to "
20736                                         "instantiate\n");
20737                         break;
20738                 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20739                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20740                                         "3200 Firmware write complete: Port "
20741                                         "Migration or PCI Reset required to "
20742                                         "instantiate\n");
20743                         break;
20744                 case (LPFC_CHANGE_STATUS_PCI_RESET):
20745                         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20746                                         "3201 Firmware write complete: PCI "
20747                                         "Reset required to instantiate\n");
20748                         break;
20749                 default:
20750                         break;
20751                 }
20752         }
20753 }
20754
20755 /**
20756  * lpfc_wr_object - write an object to the firmware
20757  * @phba: HBA structure that indicates port to create a queue on.
20758  * @dmabuf_list: list of dmabufs to write to the port.
20759  * @size: the total byte value of the objects to write to the port.
20760  * @offset: the current offset to be used to start the transfer.
20761  *
20762  * This routine will create a wr_object mailbox command to send to the port.
20763  * the mailbox command will be constructed using the dma buffers described in
20764  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20765  * BDEs that the imbedded mailbox can support. The @offset variable will be
20766  * used to indicate the starting offset of the transfer and will also return
20767  * the offset after the write object mailbox has completed. @size is used to
20768  * determine the end of the object and whether the eof bit should be set.
20769  *
20770  * Return 0 is successful and offset will contain the new offset to use
20771  * for the next write.
20772  * Return negative value for error cases.
20773  **/
20774 int
20775 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20776                uint32_t size, uint32_t *offset)
20777 {
20778         struct lpfc_mbx_wr_object *wr_object;
20779         LPFC_MBOXQ_t *mbox;
20780         int rc = 0, i = 0;
20781         int mbox_status = 0;
20782         uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20783         uint32_t shdr_change_status = 0, shdr_csf = 0;
20784         uint32_t mbox_tmo;
20785         struct lpfc_dmabuf *dmabuf;
20786         uint32_t written = 0;
20787         bool check_change_status = false;
20788
20789         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20790         if (!mbox)
20791                 return -ENOMEM;
20792
20793         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20794                         LPFC_MBOX_OPCODE_WRITE_OBJECT,
20795                         sizeof(struct lpfc_mbx_wr_object) -
20796                         sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20797
20798         wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20799         wr_object->u.request.write_offset = *offset;
20800         sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20801         wr_object->u.request.object_name[0] =
20802                 cpu_to_le32(wr_object->u.request.object_name[0]);
20803         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20804         list_for_each_entry(dmabuf, dmabuf_list, list) {
20805                 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20806                         break;
20807                 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20808                 wr_object->u.request.bde[i].addrHigh =
20809                         putPaddrHigh(dmabuf->phys);
20810                 if (written + SLI4_PAGE_SIZE >= size) {
20811                         wr_object->u.request.bde[i].tus.f.bdeSize =
20812                                 (size - written);
20813                         written += (size - written);
20814                         bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20815                         bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20816                         check_change_status = true;
20817                 } else {
20818                         wr_object->u.request.bde[i].tus.f.bdeSize =
20819                                 SLI4_PAGE_SIZE;
20820                         written += SLI4_PAGE_SIZE;
20821                 }
20822                 i++;
20823         }
20824         wr_object->u.request.bde_count = i;
20825         bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20826         if (!phba->sli4_hba.intr_enable)
20827                 mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20828         else {
20829                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20830                 mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20831         }
20832
20833         /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
20834         rc = mbox_status;
20835
20836         /* The IOCTL status is embedded in the mailbox subheader. */
20837         shdr_status = bf_get(lpfc_mbox_hdr_status,
20838                              &wr_object->header.cfg_shdr.response);
20839         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20840                                  &wr_object->header.cfg_shdr.response);
20841         shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20842                                    &wr_object->header.cfg_shdr.response);
20843         if (check_change_status) {
20844                 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20845                                             &wr_object->u.response);
20846                 shdr_csf = bf_get(lpfc_wr_object_csf,
20847                                   &wr_object->u.response);
20848         }
20849
20850         if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20851                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20852                                 "3025 Write Object mailbox failed with "
20853                                 "status x%x add_status x%x, add_status_2 x%x, "
20854                                 "mbx status x%x\n",
20855                                 shdr_status, shdr_add_status, shdr_add_status_2,
20856                                 rc);
20857                 rc = -ENXIO;
20858                 *offset = shdr_add_status;
20859         } else {
20860                 *offset += wr_object->u.response.actual_write_length;
20861         }
20862
20863         if (rc || check_change_status)
20864                 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20865                                        shdr_add_status_2, shdr_change_status,
20866                                        shdr_csf);
20867
20868         if (!phba->sli4_hba.intr_enable)
20869                 mempool_free(mbox, phba->mbox_mem_pool);
20870         else if (mbox_status != MBX_TIMEOUT)
20871                 mempool_free(mbox, phba->mbox_mem_pool);
20872
20873         return rc;
20874 }
20875
20876 /**
20877  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20878  * @vport: pointer to vport data structure.
20879  *
20880  * This function iterate through the mailboxq and clean up all REG_LOGIN
20881  * and REG_VPI mailbox commands associated with the vport. This function
20882  * is called when driver want to restart discovery of the vport due to
20883  * a Clear Virtual Link event.
20884  **/
20885 void
20886 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20887 {
20888         struct lpfc_hba *phba = vport->phba;
20889         LPFC_MBOXQ_t *mb, *nextmb;
20890         struct lpfc_nodelist *ndlp;
20891         struct lpfc_nodelist *act_mbx_ndlp = NULL;
20892         LIST_HEAD(mbox_cmd_list);
20893         uint8_t restart_loop;
20894
20895         /* Clean up internally queued mailbox commands with the vport */
20896         spin_lock_irq(&phba->hbalock);
20897         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20898                 if (mb->vport != vport)
20899                         continue;
20900
20901                 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20902                         (mb->u.mb.mbxCommand != MBX_REG_VPI))
20903                         continue;
20904
20905                 list_move_tail(&mb->list, &mbox_cmd_list);
20906         }
20907         /* Clean up active mailbox command with the vport */
20908         mb = phba->sli.mbox_active;
20909         if (mb && (mb->vport == vport)) {
20910                 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20911                         (mb->u.mb.mbxCommand == MBX_REG_VPI))
20912                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20913                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20914                         act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20915
20916                         /* This reference is local to this routine.  The
20917                          * reference is removed at routine exit.
20918                          */
20919                         act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20920
20921                         /* Unregister the RPI when mailbox complete */
20922                         mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20923                 }
20924         }
20925         /* Cleanup any mailbox completions which are not yet processed */
20926         do {
20927                 restart_loop = 0;
20928                 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20929                         /*
20930                          * If this mailox is already processed or it is
20931                          * for another vport ignore it.
20932                          */
20933                         if ((mb->vport != vport) ||
20934                                 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20935                                 continue;
20936
20937                         if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20938                                 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20939                                 continue;
20940
20941                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20942                         if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20943                                 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20944                                 /* Unregister the RPI when mailbox complete */
20945                                 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20946                                 restart_loop = 1;
20947                                 spin_unlock_irq(&phba->hbalock);
20948                                 spin_lock(&ndlp->lock);
20949                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20950                                 spin_unlock(&ndlp->lock);
20951                                 spin_lock_irq(&phba->hbalock);
20952                                 break;
20953                         }
20954                 }
20955         } while (restart_loop);
20956
20957         spin_unlock_irq(&phba->hbalock);
20958
20959         /* Release the cleaned-up mailbox commands */
20960         while (!list_empty(&mbox_cmd_list)) {
20961                 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20962                 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20963                         ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20964                         mb->ctx_ndlp = NULL;
20965                         if (ndlp) {
20966                                 spin_lock(&ndlp->lock);
20967                                 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20968                                 spin_unlock(&ndlp->lock);
20969                                 lpfc_nlp_put(ndlp);
20970                         }
20971                 }
20972                 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
20973         }
20974
20975         /* Release the ndlp with the cleaned-up active mailbox command */
20976         if (act_mbx_ndlp) {
20977                 spin_lock(&act_mbx_ndlp->lock);
20978                 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20979                 spin_unlock(&act_mbx_ndlp->lock);
20980                 lpfc_nlp_put(act_mbx_ndlp);
20981         }
20982 }
20983
20984 /**
20985  * lpfc_drain_txq - Drain the txq
20986  * @phba: Pointer to HBA context object.
20987  *
20988  * This function attempt to submit IOCBs on the txq
20989  * to the adapter.  For SLI4 adapters, the txq contains
20990  * ELS IOCBs that have been deferred because the there
20991  * are no SGLs.  This congestion can occur with large
20992  * vport counts during node discovery.
20993  **/
20994
20995 uint32_t
20996 lpfc_drain_txq(struct lpfc_hba *phba)
20997 {
20998         LIST_HEAD(completions);
20999         struct lpfc_sli_ring *pring;
21000         struct lpfc_iocbq *piocbq = NULL;
21001         unsigned long iflags = 0;
21002         char *fail_msg = NULL;
21003         uint32_t txq_cnt = 0;
21004         struct lpfc_queue *wq;
21005         int ret = 0;
21006
21007         if (phba->link_flag & LS_MDS_LOOPBACK) {
21008                 /* MDS WQE are posted only to first WQ*/
21009                 wq = phba->sli4_hba.hdwq[0].io_wq;
21010                 if (unlikely(!wq))
21011                         return 0;
21012                 pring = wq->pring;
21013         } else {
21014                 wq = phba->sli4_hba.els_wq;
21015                 if (unlikely(!wq))
21016                         return 0;
21017                 pring = lpfc_phba_elsring(phba);
21018         }
21019
21020         if (unlikely(!pring) || list_empty(&pring->txq))
21021                 return 0;
21022
21023         spin_lock_irqsave(&pring->ring_lock, iflags);
21024         list_for_each_entry(piocbq, &pring->txq, list) {
21025                 txq_cnt++;
21026         }
21027
21028         if (txq_cnt > pring->txq_max)
21029                 pring->txq_max = txq_cnt;
21030
21031         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21032
21033         while (!list_empty(&pring->txq)) {
21034                 spin_lock_irqsave(&pring->ring_lock, iflags);
21035
21036                 piocbq = lpfc_sli_ringtx_get(phba, pring);
21037                 if (!piocbq) {
21038                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21039                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21040                                 "2823 txq empty and txq_cnt is %d\n ",
21041                                 txq_cnt);
21042                         break;
21043                 }
21044                 txq_cnt--;
21045
21046                 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
21047
21048                 if (ret && ret != IOCB_BUSY) {
21049                         fail_msg = " - Cannot send IO ";
21050                         piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21051                 }
21052                 if (fail_msg) {
21053                         piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
21054                         /* Failed means we can't issue and need to cancel */
21055                         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21056                                         "2822 IOCB failed %s iotag 0x%x "
21057                                         "xri 0x%x %d flg x%x\n",
21058                                         fail_msg, piocbq->iotag,
21059                                         piocbq->sli4_xritag, ret,
21060                                         piocbq->cmd_flag);
21061                         list_add_tail(&piocbq->list, &completions);
21062                         fail_msg = NULL;
21063                 }
21064                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21065                 if (txq_cnt == 0 || ret == IOCB_BUSY)
21066                         break;
21067         }
21068         /* Cancel all the IOCBs that cannot be issued */
21069         lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21070                               IOERR_SLI_ABORTED);
21071
21072         return txq_cnt;
21073 }
21074
21075 /**
21076  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21077  * @phba: Pointer to HBA context object.
21078  * @pwqeq: Pointer to command WQE.
21079  * @sglq: Pointer to the scatter gather queue object.
21080  *
21081  * This routine converts the bpl or bde that is in the WQE
21082  * to a sgl list for the sli4 hardware. The physical address
21083  * of the bpl/bde is converted back to a virtual address.
21084  * If the WQE contains a BPL then the list of BDE's is
21085  * converted to sli4_sge's. If the WQE contains a single
21086  * BDE then it is converted to a single sli_sge.
21087  * The WQE is still in cpu endianness so the contents of
21088  * the bpl can be used without byte swapping.
21089  *
21090  * Returns valid XRI = Success, NO_XRI = Failure.
21091  */
21092 static uint16_t
21093 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21094                  struct lpfc_sglq *sglq)
21095 {
21096         uint16_t xritag = NO_XRI;
21097         struct ulp_bde64 *bpl = NULL;
21098         struct ulp_bde64 bde;
21099         struct sli4_sge *sgl  = NULL;
21100         struct lpfc_dmabuf *dmabuf;
21101         union lpfc_wqe128 *wqe;
21102         int numBdes = 0;
21103         int i = 0;
21104         uint32_t offset = 0; /* accumulated offset in the sg request list */
21105         int inbound = 0; /* number of sg reply entries inbound from firmware */
21106         uint32_t cmd;
21107
21108         if (!pwqeq || !sglq)
21109                 return xritag;
21110
21111         sgl  = (struct sli4_sge *)sglq->sgl;
21112         wqe = &pwqeq->wqe;
21113         pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21114
21115         cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21116         if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21117                 return sglq->sli4_xritag;
21118         numBdes = pwqeq->num_bdes;
21119         if (numBdes) {
21120                 /* The addrHigh and addrLow fields within the WQE
21121                  * have not been byteswapped yet so there is no
21122                  * need to swap them back.
21123                  */
21124                 if (pwqeq->bpl_dmabuf)
21125                         dmabuf = pwqeq->bpl_dmabuf;
21126                 else
21127                         return xritag;
21128
21129                 bpl  = (struct ulp_bde64 *)dmabuf->virt;
21130                 if (!bpl)
21131                         return xritag;
21132
21133                 for (i = 0; i < numBdes; i++) {
21134                         /* Should already be byte swapped. */
21135                         sgl->addr_hi = bpl->addrHigh;
21136                         sgl->addr_lo = bpl->addrLow;
21137
21138                         sgl->word2 = le32_to_cpu(sgl->word2);
21139                         if ((i+1) == numBdes)
21140                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
21141                         else
21142                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
21143                         /* swap the size field back to the cpu so we
21144                          * can assign it to the sgl.
21145                          */
21146                         bde.tus.w = le32_to_cpu(bpl->tus.w);
21147                         sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21148                         /* The offsets in the sgl need to be accumulated
21149                          * separately for the request and reply lists.
21150                          * The request is always first, the reply follows.
21151                          */
21152                         switch (cmd) {
21153                         case CMD_GEN_REQUEST64_WQE:
21154                                 /* add up the reply sg entries */
21155                                 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21156                                         inbound++;
21157                                 /* first inbound? reset the offset */
21158                                 if (inbound == 1)
21159                                         offset = 0;
21160                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21161                                 bf_set(lpfc_sli4_sge_type, sgl,
21162                                         LPFC_SGE_TYPE_DATA);
21163                                 offset += bde.tus.f.bdeSize;
21164                                 break;
21165                         case CMD_FCP_TRSP64_WQE:
21166                                 bf_set(lpfc_sli4_sge_offset, sgl, 0);
21167                                 bf_set(lpfc_sli4_sge_type, sgl,
21168                                         LPFC_SGE_TYPE_DATA);
21169                                 break;
21170                         case CMD_FCP_TSEND64_WQE:
21171                         case CMD_FCP_TRECEIVE64_WQE:
21172                                 bf_set(lpfc_sli4_sge_type, sgl,
21173                                         bpl->tus.f.bdeFlags);
21174                                 if (i < 3)
21175                                         offset = 0;
21176                                 else
21177                                         offset += bde.tus.f.bdeSize;
21178                                 bf_set(lpfc_sli4_sge_offset, sgl, offset);
21179                                 break;
21180                         }
21181                         sgl->word2 = cpu_to_le32(sgl->word2);
21182                         bpl++;
21183                         sgl++;
21184                 }
21185         } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21186                 /* The addrHigh and addrLow fields of the BDE have not
21187                  * been byteswapped yet so they need to be swapped
21188                  * before putting them in the sgl.
21189                  */
21190                 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21191                 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21192                 sgl->word2 = le32_to_cpu(sgl->word2);
21193                 bf_set(lpfc_sli4_sge_last, sgl, 1);
21194                 sgl->word2 = cpu_to_le32(sgl->word2);
21195                 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21196         }
21197         return sglq->sli4_xritag;
21198 }
21199
21200 /**
21201  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21202  * @phba: Pointer to HBA context object.
21203  * @qp: Pointer to HDW queue.
21204  * @pwqe: Pointer to command WQE.
21205  **/
21206 int
21207 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21208                     struct lpfc_iocbq *pwqe)
21209 {
21210         union lpfc_wqe128 *wqe = &pwqe->wqe;
21211         struct lpfc_async_xchg_ctx *ctxp;
21212         struct lpfc_queue *wq;
21213         struct lpfc_sglq *sglq;
21214         struct lpfc_sli_ring *pring;
21215         unsigned long iflags;
21216         uint32_t ret = 0;
21217
21218         /* NVME_LS and NVME_LS ABTS requests. */
21219         if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21220                 pring =  phba->sli4_hba.nvmels_wq->pring;
21221                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21222                                           qp, wq_access);
21223                 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21224                 if (!sglq) {
21225                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21226                         return WQE_BUSY;
21227                 }
21228                 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21229                 pwqe->sli4_xritag = sglq->sli4_xritag;
21230                 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21231                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21232                         return WQE_ERROR;
21233                 }
21234                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21235                        pwqe->sli4_xritag);
21236                 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21237                 if (ret) {
21238                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21239                         return ret;
21240                 }
21241
21242                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21243                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21244
21245                 lpfc_sli4_poll_eq(qp->hba_eq);
21246                 return 0;
21247         }
21248
21249         /* NVME_FCREQ and NVME_ABTS requests */
21250         if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21251                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21252                 wq = qp->io_wq;
21253                 pring = wq->pring;
21254
21255                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21256
21257                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21258                                           qp, wq_access);
21259                 ret = lpfc_sli4_wq_put(wq, wqe);
21260                 if (ret) {
21261                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21262                         return ret;
21263                 }
21264                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21265                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21266
21267                 lpfc_sli4_poll_eq(qp->hba_eq);
21268                 return 0;
21269         }
21270
21271         /* NVMET requests */
21272         if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21273                 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
21274                 wq = qp->io_wq;
21275                 pring = wq->pring;
21276
21277                 ctxp = pwqe->context_un.axchg;
21278                 sglq = ctxp->ctxbuf->sglq;
21279                 if (pwqe->sli4_xritag ==  NO_XRI) {
21280                         pwqe->sli4_lxritag = sglq->sli4_lxritag;
21281                         pwqe->sli4_xritag = sglq->sli4_xritag;
21282                 }
21283                 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21284                        pwqe->sli4_xritag);
21285                 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21286
21287                 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21288                                           qp, wq_access);
21289                 ret = lpfc_sli4_wq_put(wq, wqe);
21290                 if (ret) {
21291                         spin_unlock_irqrestore(&pring->ring_lock, iflags);
21292                         return ret;
21293                 }
21294                 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21295                 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21296
21297                 lpfc_sli4_poll_eq(qp->hba_eq);
21298                 return 0;
21299         }
21300         return WQE_ERROR;
21301 }
21302
21303 /**
21304  * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21305  * @phba: Pointer to HBA context object.
21306  * @cmdiocb: Pointer to driver command iocb object.
21307  * @cmpl: completion function.
21308  *
21309  * Fill the appropriate fields for the abort WQE and call
21310  * internal routine lpfc_sli4_issue_wqe to send the WQE
21311  * This function is called with hbalock held and no ring_lock held.
21312  *
21313  * RETURNS 0 - SUCCESS
21314  **/
21315
21316 int
21317 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21318                             void *cmpl)
21319 {
21320         struct lpfc_vport *vport = cmdiocb->vport;
21321         struct lpfc_iocbq *abtsiocb = NULL;
21322         union lpfc_wqe128 *abtswqe;
21323         struct lpfc_io_buf *lpfc_cmd;
21324         int retval = IOCB_ERROR;
21325         u16 xritag = cmdiocb->sli4_xritag;
21326
21327         /*
21328          * The scsi command can not be in txq and it is in flight because the
21329          * pCmd is still pointing at the SCSI command we have to abort. There
21330          * is no need to search the txcmplq. Just send an abort to the FW.
21331          */
21332
21333         abtsiocb = __lpfc_sli_get_iocbq(phba);
21334         if (!abtsiocb)
21335                 return WQE_NORESOURCE;
21336
21337         /* Indicate the IO is being aborted by the driver. */
21338         cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21339
21340         abtswqe = &abtsiocb->wqe;
21341         memset(abtswqe, 0, sizeof(*abtswqe));
21342
21343         if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21344                 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21345         bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21346         abtswqe->abort_cmd.rsrvd5 = 0;
21347         abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21348         bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21349         bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21350         bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21351         bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21352         bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21353         bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21354
21355         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
21356         abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21357         abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21358         if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21359                 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21360         if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21361                 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21362         if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21363                 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21364         abtsiocb->vport = vport;
21365         abtsiocb->cmd_cmpl = cmpl;
21366
21367         lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21368         retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21369
21370         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21371                          "0359 Abort xri x%x, original iotag x%x, "
21372                          "abort cmd iotag x%x retval x%x\n",
21373                          xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21374
21375         if (retval) {
21376                 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21377                 __lpfc_sli_release_iocbq(phba, abtsiocb);
21378         }
21379
21380         return retval;
21381 }
21382
21383 #ifdef LPFC_MXP_STAT
21384 /**
21385  * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21386  * @phba: pointer to lpfc hba data structure.
21387  * @hwqid: belong to which HWQ.
21388  *
21389  * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21390  * 15 seconds after a test case is running.
21391  *
21392  * The user should call lpfc_debugfs_multixripools_write before running a test
21393  * case to clear stat_snapshot_taken. Then the user starts a test case. During
21394  * test case is running, stat_snapshot_taken is incremented by 1 every time when
21395  * this routine is called from heartbeat timer. When stat_snapshot_taken is
21396  * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21397  **/
21398 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21399 {
21400         struct lpfc_sli4_hdw_queue *qp;
21401         struct lpfc_multixri_pool *multixri_pool;
21402         struct lpfc_pvt_pool *pvt_pool;
21403         struct lpfc_pbl_pool *pbl_pool;
21404         u32 txcmplq_cnt;
21405
21406         qp = &phba->sli4_hba.hdwq[hwqid];
21407         multixri_pool = qp->p_multixri_pool;
21408         if (!multixri_pool)
21409                 return;
21410
21411         if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21412                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21413                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21414                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21415
21416                 multixri_pool->stat_pbl_count = pbl_pool->count;
21417                 multixri_pool->stat_pvt_count = pvt_pool->count;
21418                 multixri_pool->stat_busy_count = txcmplq_cnt;
21419         }
21420
21421         multixri_pool->stat_snapshot_taken++;
21422 }
21423 #endif
21424
21425 /**
21426  * lpfc_adjust_pvt_pool_count - Adjust private pool count
21427  * @phba: pointer to lpfc hba data structure.
21428  * @hwqid: belong to which HWQ.
21429  *
21430  * This routine moves some XRIs from private to public pool when private pool
21431  * is not busy.
21432  **/
21433 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21434 {
21435         struct lpfc_multixri_pool *multixri_pool;
21436         u32 io_req_count;
21437         u32 prev_io_req_count;
21438
21439         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21440         if (!multixri_pool)
21441                 return;
21442         io_req_count = multixri_pool->io_req_count;
21443         prev_io_req_count = multixri_pool->prev_io_req_count;
21444
21445         if (prev_io_req_count != io_req_count) {
21446                 /* Private pool is busy */
21447                 multixri_pool->prev_io_req_count = io_req_count;
21448         } else {
21449                 /* Private pool is not busy.
21450                  * Move XRIs from private to public pool.
21451                  */
21452                 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21453         }
21454 }
21455
21456 /**
21457  * lpfc_adjust_high_watermark - Adjust high watermark
21458  * @phba: pointer to lpfc hba data structure.
21459  * @hwqid: belong to which HWQ.
21460  *
21461  * This routine sets high watermark as number of outstanding XRIs,
21462  * but make sure the new value is between xri_limit/2 and xri_limit.
21463  **/
21464 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21465 {
21466         u32 new_watermark;
21467         u32 watermark_max;
21468         u32 watermark_min;
21469         u32 xri_limit;
21470         u32 txcmplq_cnt;
21471         u32 abts_io_bufs;
21472         struct lpfc_multixri_pool *multixri_pool;
21473         struct lpfc_sli4_hdw_queue *qp;
21474
21475         qp = &phba->sli4_hba.hdwq[hwqid];
21476         multixri_pool = qp->p_multixri_pool;
21477         if (!multixri_pool)
21478                 return;
21479         xri_limit = multixri_pool->xri_limit;
21480
21481         watermark_max = xri_limit;
21482         watermark_min = xri_limit / 2;
21483
21484         txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21485         abts_io_bufs = qp->abts_scsi_io_bufs;
21486         abts_io_bufs += qp->abts_nvme_io_bufs;
21487
21488         new_watermark = txcmplq_cnt + abts_io_bufs;
21489         new_watermark = min(watermark_max, new_watermark);
21490         new_watermark = max(watermark_min, new_watermark);
21491         multixri_pool->pvt_pool.high_watermark = new_watermark;
21492
21493 #ifdef LPFC_MXP_STAT
21494         multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21495                                           new_watermark);
21496 #endif
21497 }
21498
21499 /**
21500  * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21501  * @phba: pointer to lpfc hba data structure.
21502  * @hwqid: belong to which HWQ.
21503  *
21504  * This routine is called from hearbeat timer when pvt_pool is idle.
21505  * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21506  * The first step moves (all - low_watermark) amount of XRIs.
21507  * The second step moves the rest of XRIs.
21508  **/
21509 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21510 {
21511         struct lpfc_pbl_pool *pbl_pool;
21512         struct lpfc_pvt_pool *pvt_pool;
21513         struct lpfc_sli4_hdw_queue *qp;
21514         struct lpfc_io_buf *lpfc_ncmd;
21515         struct lpfc_io_buf *lpfc_ncmd_next;
21516         unsigned long iflag;
21517         struct list_head tmp_list;
21518         u32 tmp_count;
21519
21520         qp = &phba->sli4_hba.hdwq[hwqid];
21521         pbl_pool = &qp->p_multixri_pool->pbl_pool;
21522         pvt_pool = &qp->p_multixri_pool->pvt_pool;
21523         tmp_count = 0;
21524
21525         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21526         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21527
21528         if (pvt_pool->count > pvt_pool->low_watermark) {
21529                 /* Step 1: move (all - low_watermark) from pvt_pool
21530                  * to pbl_pool
21531                  */
21532
21533                 /* Move low watermark of bufs from pvt_pool to tmp_list */
21534                 INIT_LIST_HEAD(&tmp_list);
21535                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21536                                          &pvt_pool->list, list) {
21537                         list_move_tail(&lpfc_ncmd->list, &tmp_list);
21538                         tmp_count++;
21539                         if (tmp_count >= pvt_pool->low_watermark)
21540                                 break;
21541                 }
21542
21543                 /* Move all bufs from pvt_pool to pbl_pool */
21544                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21545
21546                 /* Move all bufs from tmp_list to pvt_pool */
21547                 list_splice(&tmp_list, &pvt_pool->list);
21548
21549                 pbl_pool->count += (pvt_pool->count - tmp_count);
21550                 pvt_pool->count = tmp_count;
21551         } else {
21552                 /* Step 2: move the rest from pvt_pool to pbl_pool */
21553                 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21554                 pbl_pool->count += pvt_pool->count;
21555                 pvt_pool->count = 0;
21556         }
21557
21558         spin_unlock(&pvt_pool->lock);
21559         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21560 }
21561
21562 /**
21563  * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21564  * @phba: pointer to lpfc hba data structure
21565  * @qp: pointer to HDW queue
21566  * @pbl_pool: specified public free XRI pool
21567  * @pvt_pool: specified private free XRI pool
21568  * @count: number of XRIs to move
21569  *
21570  * This routine tries to move some free common bufs from the specified pbl_pool
21571  * to the specified pvt_pool. It might move less than count XRIs if there's not
21572  * enough in public pool.
21573  *
21574  * Return:
21575  *   true - if XRIs are successfully moved from the specified pbl_pool to the
21576  *          specified pvt_pool
21577  *   false - if the specified pbl_pool is empty or locked by someone else
21578  **/
21579 static bool
21580 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21581                           struct lpfc_pbl_pool *pbl_pool,
21582                           struct lpfc_pvt_pool *pvt_pool, u32 count)
21583 {
21584         struct lpfc_io_buf *lpfc_ncmd;
21585         struct lpfc_io_buf *lpfc_ncmd_next;
21586         unsigned long iflag;
21587         int ret;
21588
21589         ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21590         if (ret) {
21591                 if (pbl_pool->count) {
21592                         /* Move a batch of XRIs from public to private pool */
21593                         lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21594                         list_for_each_entry_safe(lpfc_ncmd,
21595                                                  lpfc_ncmd_next,
21596                                                  &pbl_pool->list,
21597                                                  list) {
21598                                 list_move_tail(&lpfc_ncmd->list,
21599                                                &pvt_pool->list);
21600                                 pvt_pool->count++;
21601                                 pbl_pool->count--;
21602                                 count--;
21603                                 if (count == 0)
21604                                         break;
21605                         }
21606
21607                         spin_unlock(&pvt_pool->lock);
21608                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21609                         return true;
21610                 }
21611                 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21612         }
21613
21614         return false;
21615 }
21616
21617 /**
21618  * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21619  * @phba: pointer to lpfc hba data structure.
21620  * @hwqid: belong to which HWQ.
21621  * @count: number of XRIs to move
21622  *
21623  * This routine tries to find some free common bufs in one of public pools with
21624  * Round Robin method. The search always starts from local hwqid, then the next
21625  * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21626  * a batch of free common bufs are moved to private pool on hwqid.
21627  * It might move less than count XRIs if there's not enough in public pool.
21628  **/
21629 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21630 {
21631         struct lpfc_multixri_pool *multixri_pool;
21632         struct lpfc_multixri_pool *next_multixri_pool;
21633         struct lpfc_pvt_pool *pvt_pool;
21634         struct lpfc_pbl_pool *pbl_pool;
21635         struct lpfc_sli4_hdw_queue *qp;
21636         u32 next_hwqid;
21637         u32 hwq_count;
21638         int ret;
21639
21640         qp = &phba->sli4_hba.hdwq[hwqid];
21641         multixri_pool = qp->p_multixri_pool;
21642         pvt_pool = &multixri_pool->pvt_pool;
21643         pbl_pool = &multixri_pool->pbl_pool;
21644
21645         /* Check if local pbl_pool is available */
21646         ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21647         if (ret) {
21648 #ifdef LPFC_MXP_STAT
21649                 multixri_pool->local_pbl_hit_count++;
21650 #endif
21651                 return;
21652         }
21653
21654         hwq_count = phba->cfg_hdw_queue;
21655
21656         /* Get the next hwqid which was found last time */
21657         next_hwqid = multixri_pool->rrb_next_hwqid;
21658
21659         do {
21660                 /* Go to next hwq */
21661                 next_hwqid = (next_hwqid + 1) % hwq_count;
21662
21663                 next_multixri_pool =
21664                         phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21665                 pbl_pool = &next_multixri_pool->pbl_pool;
21666
21667                 /* Check if the public free xri pool is available */
21668                 ret = _lpfc_move_xri_pbl_to_pvt(
21669                         phba, qp, pbl_pool, pvt_pool, count);
21670
21671                 /* Exit while-loop if success or all hwqid are checked */
21672         } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21673
21674         /* Starting point for the next time */
21675         multixri_pool->rrb_next_hwqid = next_hwqid;
21676
21677         if (!ret) {
21678                 /* stats: all public pools are empty*/
21679                 multixri_pool->pbl_empty_count++;
21680         }
21681
21682 #ifdef LPFC_MXP_STAT
21683         if (ret) {
21684                 if (next_hwqid == hwqid)
21685                         multixri_pool->local_pbl_hit_count++;
21686                 else
21687                         multixri_pool->other_pbl_hit_count++;
21688         }
21689 #endif
21690 }
21691
21692 /**
21693  * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21694  * @phba: pointer to lpfc hba data structure.
21695  * @hwqid: belong to which HWQ.
21696  *
21697  * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21698  * low watermark.
21699  **/
21700 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21701 {
21702         struct lpfc_multixri_pool *multixri_pool;
21703         struct lpfc_pvt_pool *pvt_pool;
21704
21705         multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21706         pvt_pool = &multixri_pool->pvt_pool;
21707
21708         if (pvt_pool->count < pvt_pool->low_watermark)
21709                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21710 }
21711
21712 /**
21713  * lpfc_release_io_buf - Return one IO buf back to free pool
21714  * @phba: pointer to lpfc hba data structure.
21715  * @lpfc_ncmd: IO buf to be returned.
21716  * @qp: belong to which HWQ.
21717  *
21718  * This routine returns one IO buf back to free pool. If this is an urgent IO,
21719  * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21720  * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21721  * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
21722  * lpfc_io_buf_list_put.
21723  **/
21724 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21725                          struct lpfc_sli4_hdw_queue *qp)
21726 {
21727         unsigned long iflag;
21728         struct lpfc_pbl_pool *pbl_pool;
21729         struct lpfc_pvt_pool *pvt_pool;
21730         struct lpfc_epd_pool *epd_pool;
21731         u32 txcmplq_cnt;
21732         u32 xri_owned;
21733         u32 xri_limit;
21734         u32 abts_io_bufs;
21735
21736         /* MUST zero fields if buffer is reused by another protocol */
21737         lpfc_ncmd->nvmeCmd = NULL;
21738         lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21739
21740         if (phba->cfg_xpsgl && !phba->nvmet_support &&
21741             !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21742                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21743
21744         if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21745                 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21746
21747         if (phba->cfg_xri_rebalancing) {
21748                 if (lpfc_ncmd->expedite) {
21749                         /* Return to expedite pool */
21750                         epd_pool = &phba->epd_pool;
21751                         spin_lock_irqsave(&epd_pool->lock, iflag);
21752                         list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21753                         epd_pool->count++;
21754                         spin_unlock_irqrestore(&epd_pool->lock, iflag);
21755                         return;
21756                 }
21757
21758                 /* Avoid invalid access if an IO sneaks in and is being rejected
21759                  * just _after_ xri pools are destroyed in lpfc_offline.
21760                  * Nothing much can be done at this point.
21761                  */
21762                 if (!qp->p_multixri_pool)
21763                         return;
21764
21765                 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21766                 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21767
21768                 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21769                 abts_io_bufs = qp->abts_scsi_io_bufs;
21770                 abts_io_bufs += qp->abts_nvme_io_bufs;
21771
21772                 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21773                 xri_limit = qp->p_multixri_pool->xri_limit;
21774
21775 #ifdef LPFC_MXP_STAT
21776                 if (xri_owned <= xri_limit)
21777                         qp->p_multixri_pool->below_limit_count++;
21778                 else
21779                         qp->p_multixri_pool->above_limit_count++;
21780 #endif
21781
21782                 /* XRI goes to either public or private free xri pool
21783                  *     based on watermark and xri_limit
21784                  */
21785                 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21786                     (xri_owned < xri_limit &&
21787                      pvt_pool->count < pvt_pool->high_watermark)) {
21788                         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21789                                                   qp, free_pvt_pool);
21790                         list_add_tail(&lpfc_ncmd->list,
21791                                       &pvt_pool->list);
21792                         pvt_pool->count++;
21793                         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21794                 } else {
21795                         lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21796                                                   qp, free_pub_pool);
21797                         list_add_tail(&lpfc_ncmd->list,
21798                                       &pbl_pool->list);
21799                         pbl_pool->count++;
21800                         spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21801                 }
21802         } else {
21803                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21804                                           qp, free_xri);
21805                 list_add_tail(&lpfc_ncmd->list,
21806                               &qp->lpfc_io_buf_list_put);
21807                 qp->put_io_bufs++;
21808                 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21809                                        iflag);
21810         }
21811 }
21812
21813 /**
21814  * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21815  * @phba: pointer to lpfc hba data structure.
21816  * @qp: pointer to HDW queue
21817  * @pvt_pool: pointer to private pool data structure.
21818  * @ndlp: pointer to lpfc nodelist data structure.
21819  *
21820  * This routine tries to get one free IO buf from private pool.
21821  *
21822  * Return:
21823  *   pointer to one free IO buf - if private pool is not empty
21824  *   NULL - if private pool is empty
21825  **/
21826 static struct lpfc_io_buf *
21827 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21828                                   struct lpfc_sli4_hdw_queue *qp,
21829                                   struct lpfc_pvt_pool *pvt_pool,
21830                                   struct lpfc_nodelist *ndlp)
21831 {
21832         struct lpfc_io_buf *lpfc_ncmd;
21833         struct lpfc_io_buf *lpfc_ncmd_next;
21834         unsigned long iflag;
21835
21836         lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21837         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21838                                  &pvt_pool->list, list) {
21839                 if (lpfc_test_rrq_active(
21840                         phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21841                         continue;
21842                 list_del(&lpfc_ncmd->list);
21843                 pvt_pool->count--;
21844                 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21845                 return lpfc_ncmd;
21846         }
21847         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21848
21849         return NULL;
21850 }
21851
21852 /**
21853  * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21854  * @phba: pointer to lpfc hba data structure.
21855  *
21856  * This routine tries to get one free IO buf from expedite pool.
21857  *
21858  * Return:
21859  *   pointer to one free IO buf - if expedite pool is not empty
21860  *   NULL - if expedite pool is empty
21861  **/
21862 static struct lpfc_io_buf *
21863 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21864 {
21865         struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
21866         struct lpfc_io_buf *lpfc_ncmd_next;
21867         unsigned long iflag;
21868         struct lpfc_epd_pool *epd_pool;
21869
21870         epd_pool = &phba->epd_pool;
21871
21872         spin_lock_irqsave(&epd_pool->lock, iflag);
21873         if (epd_pool->count > 0) {
21874                 list_for_each_entry_safe(iter, lpfc_ncmd_next,
21875                                          &epd_pool->list, list) {
21876                         list_del(&iter->list);
21877                         epd_pool->count--;
21878                         lpfc_ncmd = iter;
21879                         break;
21880                 }
21881         }
21882         spin_unlock_irqrestore(&epd_pool->lock, iflag);
21883
21884         return lpfc_ncmd;
21885 }
21886
21887 /**
21888  * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21889  * @phba: pointer to lpfc hba data structure.
21890  * @ndlp: pointer to lpfc nodelist data structure.
21891  * @hwqid: belong to which HWQ
21892  * @expedite: 1 means this request is urgent.
21893  *
21894  * This routine will do the following actions and then return a pointer to
21895  * one free IO buf.
21896  *
21897  * 1. If private free xri count is empty, move some XRIs from public to
21898  *    private pool.
21899  * 2. Get one XRI from private free xri pool.
21900  * 3. If we fail to get one from pvt_pool and this is an expedite request,
21901  *    get one free xri from expedite pool.
21902  *
21903  * Note: ndlp is only used on SCSI side for RRQ testing.
21904  *       The caller should pass NULL for ndlp on NVME side.
21905  *
21906  * Return:
21907  *   pointer to one free IO buf - if private pool is not empty
21908  *   NULL - if private pool is empty
21909  **/
21910 static struct lpfc_io_buf *
21911 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21912                                     struct lpfc_nodelist *ndlp,
21913                                     int hwqid, int expedite)
21914 {
21915         struct lpfc_sli4_hdw_queue *qp;
21916         struct lpfc_multixri_pool *multixri_pool;
21917         struct lpfc_pvt_pool *pvt_pool;
21918         struct lpfc_io_buf *lpfc_ncmd;
21919
21920         qp = &phba->sli4_hba.hdwq[hwqid];
21921         lpfc_ncmd = NULL;
21922         if (!qp) {
21923                 lpfc_printf_log(phba, KERN_INFO,
21924                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21925                                 "5556 NULL qp for hwqid  x%x\n", hwqid);
21926                 return lpfc_ncmd;
21927         }
21928         multixri_pool = qp->p_multixri_pool;
21929         if (!multixri_pool) {
21930                 lpfc_printf_log(phba, KERN_INFO,
21931                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21932                                 "5557 NULL multixri for hwqid  x%x\n", hwqid);
21933                 return lpfc_ncmd;
21934         }
21935         pvt_pool = &multixri_pool->pvt_pool;
21936         if (!pvt_pool) {
21937                 lpfc_printf_log(phba, KERN_INFO,
21938                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21939                                 "5558 NULL pvt_pool for hwqid  x%x\n", hwqid);
21940                 return lpfc_ncmd;
21941         }
21942         multixri_pool->io_req_count++;
21943
21944         /* If pvt_pool is empty, move some XRIs from public to private pool */
21945         if (pvt_pool->count == 0)
21946                 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21947
21948         /* Get one XRI from private free xri pool */
21949         lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21950
21951         if (lpfc_ncmd) {
21952                 lpfc_ncmd->hdwq = qp;
21953                 lpfc_ncmd->hdwq_no = hwqid;
21954         } else if (expedite) {
21955                 /* If we fail to get one from pvt_pool and this is an expedite
21956                  * request, get one free xri from expedite pool.
21957                  */
21958                 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21959         }
21960
21961         return lpfc_ncmd;
21962 }
21963
21964 static inline struct lpfc_io_buf *
21965 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21966 {
21967         struct lpfc_sli4_hdw_queue *qp;
21968         struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21969
21970         qp = &phba->sli4_hba.hdwq[idx];
21971         list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21972                                  &qp->lpfc_io_buf_list_get, list) {
21973                 if (lpfc_test_rrq_active(phba, ndlp,
21974                                          lpfc_cmd->cur_iocbq.sli4_lxritag))
21975                         continue;
21976
21977                 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21978                         continue;
21979
21980                 list_del_init(&lpfc_cmd->list);
21981                 qp->get_io_bufs--;
21982                 lpfc_cmd->hdwq = qp;
21983                 lpfc_cmd->hdwq_no = idx;
21984                 return lpfc_cmd;
21985         }
21986         return NULL;
21987 }
21988
21989 /**
21990  * lpfc_get_io_buf - Get one IO buffer from free pool
21991  * @phba: The HBA for which this call is being executed.
21992  * @ndlp: pointer to lpfc nodelist data structure.
21993  * @hwqid: belong to which HWQ
21994  * @expedite: 1 means this request is urgent.
21995  *
21996  * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21997  * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21998  * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21999  *
22000  * Note: ndlp is only used on SCSI side for RRQ testing.
22001  *       The caller should pass NULL for ndlp on NVME side.
22002  *
22003  * Return codes:
22004  *   NULL - Error
22005  *   Pointer to lpfc_io_buf - Success
22006  **/
22007 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22008                                     struct lpfc_nodelist *ndlp,
22009                                     u32 hwqid, int expedite)
22010 {
22011         struct lpfc_sli4_hdw_queue *qp;
22012         unsigned long iflag;
22013         struct lpfc_io_buf *lpfc_cmd;
22014
22015         qp = &phba->sli4_hba.hdwq[hwqid];
22016         lpfc_cmd = NULL;
22017         if (!qp) {
22018                 lpfc_printf_log(phba, KERN_WARNING,
22019                                 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22020                                 "5555 NULL qp for hwqid  x%x\n", hwqid);
22021                 return lpfc_cmd;
22022         }
22023
22024         if (phba->cfg_xri_rebalancing)
22025                 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22026                         phba, ndlp, hwqid, expedite);
22027         else {
22028                 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22029                                           qp, alloc_xri_get);
22030                 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22031                         lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22032                 if (!lpfc_cmd) {
22033                         lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22034                                           qp, alloc_xri_put);
22035                         list_splice(&qp->lpfc_io_buf_list_put,
22036                                     &qp->lpfc_io_buf_list_get);
22037                         qp->get_io_bufs += qp->put_io_bufs;
22038                         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22039                         qp->put_io_bufs = 0;
22040                         spin_unlock(&qp->io_buf_list_put_lock);
22041                         if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22042                             expedite)
22043                                 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22044                 }
22045                 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22046         }
22047
22048         return lpfc_cmd;
22049 }
22050
22051 /**
22052  * lpfc_read_object - Retrieve object data from HBA
22053  * @phba: The HBA for which this call is being executed.
22054  * @rdobject: Pathname of object data we want to read.
22055  * @datap: Pointer to where data will be copied to.
22056  * @datasz: size of data area
22057  *
22058  * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22059  * The data will be truncated if datasz is not large enough.
22060  * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22061  * Returns the actual bytes read from the object.
22062  */
22063 int
22064 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22065                  uint32_t datasz)
22066 {
22067         struct lpfc_mbx_read_object *read_object;
22068         LPFC_MBOXQ_t *mbox;
22069         int rc, length, eof, j, byte_cnt = 0;
22070         uint32_t shdr_status, shdr_add_status;
22071         union lpfc_sli4_cfg_shdr *shdr;
22072         struct lpfc_dmabuf *pcmd;
22073         u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22074
22075         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22076         if (!mbox)
22077                 return -ENOMEM;
22078         length = (sizeof(struct lpfc_mbx_read_object) -
22079                   sizeof(struct lpfc_sli4_cfg_mhdr));
22080         lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22081                          LPFC_MBOX_OPCODE_READ_OBJECT,
22082                          length, LPFC_SLI4_MBX_EMBED);
22083         read_object = &mbox->u.mqe.un.read_object;
22084         shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22085
22086         bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22087         bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22088         read_object->u.request.rd_object_offset = 0;
22089         read_object->u.request.rd_object_cnt = 1;
22090
22091         memset((void *)read_object->u.request.rd_object_name, 0,
22092                LPFC_OBJ_NAME_SZ);
22093         scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22094         for (j = 0; j < strlen(rdobject); j++)
22095                 read_object->u.request.rd_object_name[j] =
22096                         cpu_to_le32(rd_object_name[j]);
22097
22098         pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22099         if (pcmd)
22100                 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22101         if (!pcmd || !pcmd->virt) {
22102                 kfree(pcmd);
22103                 mempool_free(mbox, phba->mbox_mem_pool);
22104                 return -ENOMEM;
22105         }
22106         memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22107         read_object->u.request.rd_object_hbuf[0].pa_lo =
22108                 putPaddrLow(pcmd->phys);
22109         read_object->u.request.rd_object_hbuf[0].pa_hi =
22110                 putPaddrHigh(pcmd->phys);
22111         read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22112
22113         mbox->vport = phba->pport;
22114         mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22115         mbox->ctx_ndlp = NULL;
22116
22117         rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22118         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22119         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22120
22121         if (shdr_status == STATUS_FAILED &&
22122             shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22123                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22124                                 "4674 No port cfg file in FW.\n");
22125                 byte_cnt = -ENOENT;
22126         } else if (shdr_status || shdr_add_status || rc) {
22127                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22128                                 "2625 READ_OBJECT mailbox failed with "
22129                                 "status x%x add_status x%x, mbx status x%x\n",
22130                                 shdr_status, shdr_add_status, rc);
22131                 byte_cnt = -ENXIO;
22132         } else {
22133                 /* Success */
22134                 length = read_object->u.response.rd_object_actual_rlen;
22135                 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22136                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22137                                 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22138                                 length, datasz, eof);
22139
22140                 /* Detect the port config file exists but is empty */
22141                 if (!length && eof) {
22142                         byte_cnt = 0;
22143                         goto exit;
22144                 }
22145
22146                 byte_cnt = length;
22147                 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22148         }
22149
22150  exit:
22151         /* This is an embedded SLI4 mailbox with an external buffer allocated.
22152          * Free the pcmd and then cleanup with the correct routine.
22153          */
22154         lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22155         kfree(pcmd);
22156         lpfc_sli4_mbox_cmd_free(phba, mbox);
22157         return byte_cnt;
22158 }
22159
22160 /**
22161  * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22162  * @phba: The HBA for which this call is being executed.
22163  * @lpfc_buf: IO buf structure to append the SGL chunk
22164  *
22165  * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22166  * and will allocate an SGL chunk if the pool is empty.
22167  *
22168  * Return codes:
22169  *   NULL - Error
22170  *   Pointer to sli4_hybrid_sgl - Success
22171  **/
22172 struct sli4_hybrid_sgl *
22173 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22174 {
22175         struct sli4_hybrid_sgl *list_entry = NULL;
22176         struct sli4_hybrid_sgl *tmp = NULL;
22177         struct sli4_hybrid_sgl *allocated_sgl = NULL;
22178         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22179         struct list_head *buf_list = &hdwq->sgl_list;
22180         unsigned long iflags;
22181
22182         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22183
22184         if (likely(!list_empty(buf_list))) {
22185                 /* break off 1 chunk from the sgl_list */
22186                 list_for_each_entry_safe(list_entry, tmp,
22187                                          buf_list, list_node) {
22188                         list_move_tail(&list_entry->list_node,
22189                                        &lpfc_buf->dma_sgl_xtra_list);
22190                         break;
22191                 }
22192         } else {
22193                 /* allocate more */
22194                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22195                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22196                                    cpu_to_node(hdwq->io_wq->chann));
22197                 if (!tmp) {
22198                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22199                                         "8353 error kmalloc memory for HDWQ "
22200                                         "%d %s\n",
22201                                         lpfc_buf->hdwq_no, __func__);
22202                         return NULL;
22203                 }
22204
22205                 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22206                                               GFP_ATOMIC, &tmp->dma_phys_sgl);
22207                 if (!tmp->dma_sgl) {
22208                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22209                                         "8354 error pool_alloc memory for HDWQ "
22210                                         "%d %s\n",
22211                                         lpfc_buf->hdwq_no, __func__);
22212                         kfree(tmp);
22213                         return NULL;
22214                 }
22215
22216                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22217                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22218         }
22219
22220         allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22221                                         struct sli4_hybrid_sgl,
22222                                         list_node);
22223
22224         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22225
22226         return allocated_sgl;
22227 }
22228
22229 /**
22230  * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22231  * @phba: The HBA for which this call is being executed.
22232  * @lpfc_buf: IO buf structure with the SGL chunk
22233  *
22234  * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22235  *
22236  * Return codes:
22237  *   0 - Success
22238  *   -EINVAL - Error
22239  **/
22240 int
22241 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22242 {
22243         int rc = 0;
22244         struct sli4_hybrid_sgl *list_entry = NULL;
22245         struct sli4_hybrid_sgl *tmp = NULL;
22246         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22247         struct list_head *buf_list = &hdwq->sgl_list;
22248         unsigned long iflags;
22249
22250         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22251
22252         if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22253                 list_for_each_entry_safe(list_entry, tmp,
22254                                          &lpfc_buf->dma_sgl_xtra_list,
22255                                          list_node) {
22256                         list_move_tail(&list_entry->list_node,
22257                                        buf_list);
22258                 }
22259         } else {
22260                 rc = -EINVAL;
22261         }
22262
22263         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22264         return rc;
22265 }
22266
22267 /**
22268  * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22269  * @phba: phba object
22270  * @hdwq: hdwq to cleanup sgl buff resources on
22271  *
22272  * This routine frees all SGL chunks of hdwq SGL chunk pool.
22273  *
22274  * Return codes:
22275  *   None
22276  **/
22277 void
22278 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22279                        struct lpfc_sli4_hdw_queue *hdwq)
22280 {
22281         struct list_head *buf_list = &hdwq->sgl_list;
22282         struct sli4_hybrid_sgl *list_entry = NULL;
22283         struct sli4_hybrid_sgl *tmp = NULL;
22284         unsigned long iflags;
22285
22286         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22287
22288         /* Free sgl pool */
22289         list_for_each_entry_safe(list_entry, tmp,
22290                                  buf_list, list_node) {
22291                 list_del(&list_entry->list_node);
22292                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22293                               list_entry->dma_sgl,
22294                               list_entry->dma_phys_sgl);
22295                 kfree(list_entry);
22296         }
22297
22298         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22299 }
22300
22301 /**
22302  * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22303  * @phba: The HBA for which this call is being executed.
22304  * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22305  *
22306  * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22307  * and will allocate an CMD/RSP buffer if the pool is empty.
22308  *
22309  * Return codes:
22310  *   NULL - Error
22311  *   Pointer to fcp_cmd_rsp_buf - Success
22312  **/
22313 struct fcp_cmd_rsp_buf *
22314 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22315                               struct lpfc_io_buf *lpfc_buf)
22316 {
22317         struct fcp_cmd_rsp_buf *list_entry = NULL;
22318         struct fcp_cmd_rsp_buf *tmp = NULL;
22319         struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22320         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22321         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22322         unsigned long iflags;
22323
22324         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22325
22326         if (likely(!list_empty(buf_list))) {
22327                 /* break off 1 chunk from the list */
22328                 list_for_each_entry_safe(list_entry, tmp,
22329                                          buf_list,
22330                                          list_node) {
22331                         list_move_tail(&list_entry->list_node,
22332                                        &lpfc_buf->dma_cmd_rsp_list);
22333                         break;
22334                 }
22335         } else {
22336                 /* allocate more */
22337                 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22338                 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22339                                    cpu_to_node(hdwq->io_wq->chann));
22340                 if (!tmp) {
22341                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22342                                         "8355 error kmalloc memory for HDWQ "
22343                                         "%d %s\n",
22344                                         lpfc_buf->hdwq_no, __func__);
22345                         return NULL;
22346                 }
22347
22348                 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22349                                                 GFP_ATOMIC,
22350                                                 &tmp->fcp_cmd_rsp_dma_handle);
22351
22352                 if (!tmp->fcp_cmnd) {
22353                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22354                                         "8356 error pool_alloc memory for HDWQ "
22355                                         "%d %s\n",
22356                                         lpfc_buf->hdwq_no, __func__);
22357                         kfree(tmp);
22358                         return NULL;
22359                 }
22360
22361                 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22362                                 sizeof(struct fcp_cmnd));
22363
22364                 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22365                 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22366         }
22367
22368         allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22369                                         struct fcp_cmd_rsp_buf,
22370                                         list_node);
22371
22372         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22373
22374         return allocated_buf;
22375 }
22376
22377 /**
22378  * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22379  * @phba: The HBA for which this call is being executed.
22380  * @lpfc_buf: IO buf structure with the CMD/RSP buf
22381  *
22382  * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22383  *
22384  * Return codes:
22385  *   0 - Success
22386  *   -EINVAL - Error
22387  **/
22388 int
22389 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22390                               struct lpfc_io_buf *lpfc_buf)
22391 {
22392         int rc = 0;
22393         struct fcp_cmd_rsp_buf *list_entry = NULL;
22394         struct fcp_cmd_rsp_buf *tmp = NULL;
22395         struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22396         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22397         unsigned long iflags;
22398
22399         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22400
22401         if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22402                 list_for_each_entry_safe(list_entry, tmp,
22403                                          &lpfc_buf->dma_cmd_rsp_list,
22404                                          list_node) {
22405                         list_move_tail(&list_entry->list_node,
22406                                        buf_list);
22407                 }
22408         } else {
22409                 rc = -EINVAL;
22410         }
22411
22412         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22413         return rc;
22414 }
22415
22416 /**
22417  * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22418  * @phba: phba object
22419  * @hdwq: hdwq to cleanup cmd rsp buff resources on
22420  *
22421  * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22422  *
22423  * Return codes:
22424  *   None
22425  **/
22426 void
22427 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22428                                struct lpfc_sli4_hdw_queue *hdwq)
22429 {
22430         struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22431         struct fcp_cmd_rsp_buf *list_entry = NULL;
22432         struct fcp_cmd_rsp_buf *tmp = NULL;
22433         unsigned long iflags;
22434
22435         spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22436
22437         /* Free cmd_rsp buf pool */
22438         list_for_each_entry_safe(list_entry, tmp,
22439                                  buf_list,
22440                                  list_node) {
22441                 list_del(&list_entry->list_node);
22442                 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22443                               list_entry->fcp_cmnd,
22444                               list_entry->fcp_cmd_rsp_dma_handle);
22445                 kfree(list_entry);
22446         }
22447
22448         spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22449 }
22450
22451 /**
22452  * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22453  * @phba: phba object
22454  * @job: job entry of the command to be posted.
22455  *
22456  * Fill the common fields of the wqe for each of the command.
22457  *
22458  * Return codes:
22459  *      None
22460  **/
22461 void
22462 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22463 {
22464         u8 cmnd;
22465         u32 *pcmd;
22466         u32 if_type = 0;
22467         u32 fip, abort_tag;
22468         struct lpfc_nodelist *ndlp = NULL;
22469         union lpfc_wqe128 *wqe = &job->wqe;
22470         u8 command_type = ELS_COMMAND_NON_FIP;
22471
22472         fip = phba->hba_flag & HBA_FIP_SUPPORT;
22473         /* The fcp commands will set command type */
22474         if (job->cmd_flag &  LPFC_IO_FCP)
22475                 command_type = FCP_COMMAND;
22476         else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22477                 command_type = ELS_COMMAND_FIP;
22478         else
22479                 command_type = ELS_COMMAND_NON_FIP;
22480
22481         abort_tag = job->iotag;
22482         cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22483
22484         switch (cmnd) {
22485         case CMD_ELS_REQUEST64_WQE:
22486                 ndlp = job->ndlp;
22487
22488                 if_type = bf_get(lpfc_sli_intf_if_type,
22489                                  &phba->sli4_hba.sli_intf);
22490                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22491                         pcmd = (u32 *)job->cmd_dmabuf->virt;
22492                         if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22493                                      *pcmd == ELS_CMD_SCR ||
22494                                      *pcmd == ELS_CMD_RDF ||
22495                                      *pcmd == ELS_CMD_EDC ||
22496                                      *pcmd == ELS_CMD_RSCN_XMT ||
22497                                      *pcmd == ELS_CMD_FDISC ||
22498                                      *pcmd == ELS_CMD_LOGO ||
22499                                      *pcmd == ELS_CMD_QFPA ||
22500                                      *pcmd == ELS_CMD_UVEM ||
22501                                      *pcmd == ELS_CMD_PLOGI)) {
22502                                 bf_set(els_req64_sp, &wqe->els_req, 1);
22503                                 bf_set(els_req64_sid, &wqe->els_req,
22504                                        job->vport->fc_myDID);
22505
22506                                 if ((*pcmd == ELS_CMD_FLOGI) &&
22507                                     !(phba->fc_topology ==
22508                                       LPFC_TOPOLOGY_LOOP))
22509                                         bf_set(els_req64_sid, &wqe->els_req, 0);
22510
22511                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22512                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22513                                        phba->vpi_ids[job->vport->vpi]);
22514                         } else if (pcmd) {
22515                                 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22516                                 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22517                                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22518                         }
22519                 }
22520
22521                 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22522                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22523
22524                 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22525                 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22526                 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22527                 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22528                 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22529                 break;
22530         case CMD_XMIT_ELS_RSP64_WQE:
22531                 ndlp = job->ndlp;
22532
22533                 /* word4 */
22534                 wqe->xmit_els_rsp.word4 = 0;
22535
22536                 if_type = bf_get(lpfc_sli_intf_if_type,
22537                                  &phba->sli4_hba.sli_intf);
22538                 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22539                         if (job->vport->fc_flag & FC_PT2PT) {
22540                                 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22541                                 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22542                                        job->vport->fc_myDID);
22543                                 if (job->vport->fc_myDID == Fabric_DID) {
22544                                         bf_set(wqe_els_did,
22545                                                &wqe->xmit_els_rsp.wqe_dest, 0);
22546                                 }
22547                         }
22548                 }
22549
22550                 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22551                 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22552                 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22553                 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22554                        LPFC_WQE_LENLOC_WORD3);
22555                 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22556
22557                 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22558                         bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22559                         bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22560                                job->vport->fc_myDID);
22561                         bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22562                 }
22563
22564                 if (phba->sli_rev == LPFC_SLI_REV4) {
22565                         bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22566                                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22567
22568                         if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22569                                 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22570                                        phba->vpi_ids[job->vport->vpi]);
22571                 }
22572                 command_type = OTHER_COMMAND;
22573                 break;
22574         case CMD_GEN_REQUEST64_WQE:
22575                 /* Word 10 */
22576                 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22577                 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22578                 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22579                 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22580                 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22581                 command_type = OTHER_COMMAND;
22582                 break;
22583         case CMD_XMIT_SEQUENCE64_WQE:
22584                 if (phba->link_flag & LS_LOOPBACK_MODE)
22585                         bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22586
22587                 wqe->xmit_sequence.rsvd3 = 0;
22588                 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22589                 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22590                 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22591                        LPFC_WQE_IOD_WRITE);
22592                 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22593                        LPFC_WQE_LENLOC_WORD12);
22594                 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22595                 command_type = OTHER_COMMAND;
22596                 break;
22597         case CMD_XMIT_BLS_RSP64_WQE:
22598                 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22599                 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22600                 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22601                 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22602                        phba->vpi_ids[phba->pport->vpi]);
22603                 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22604                 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22605                        LPFC_WQE_LENLOC_NONE);
22606                 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
22607                 command_type = OTHER_COMMAND;
22608                 break;
22609         case CMD_FCP_ICMND64_WQE:       /* task mgmt commands */
22610         case CMD_ABORT_XRI_WQE:         /* abort iotag */
22611         case CMD_SEND_FRAME:            /* mds loopback */
22612                 /* cases already formatted for sli4 wqe - no chgs necessary */
22613                 return;
22614         default:
22615                 dump_stack();
22616                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22617                                 "6207 Invalid command 0x%x\n",
22618                                 cmnd);
22619                 break;
22620         }
22621
22622         wqe->generic.wqe_com.abort_tag = abort_tag;
22623         bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22624         bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22625         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22626 }
This page took 1.442906 seconds and 4 git commands to generate.