]> Git Repo - J-linux.git/blob - drivers/scsi/mpt3sas/mpt3sas_base.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[J-linux.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
1 /*
2  * This is the Fusion MPT base driver providing common API layer interface
3  * for access to MPT (Message Passing Technology) firmware.
4  *
5  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6  * Copyright (C) 2012-2014  LSI Corporation
7  * Copyright (C) 2013-2014 Avago Technologies
8  *  (mailto: [email protected])
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * as published by the Free Software Foundation; either version 2
13  * of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * NO WARRANTY
21  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25  * solely responsible for determining the appropriateness of using and
26  * distributing the Program and assumes all risks associated with its
27  * exercise of rights under this Agreement, including but not limited to
28  * the risks and costs of program errors, damage to or loss of data,
29  * programs or equipment, and unavailability or interruption of operations.
30
31  * DISCLAIMER OF LIABILITY
32  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40  * You should have received a copy of the GNU General Public License
41  * along with this program; if not, write to the Free Software
42  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43  * USA.
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/io.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h>        /* To get host page size per arch */
63
64
65 #include "mpt3sas_base.h"
66
67 static MPT_CALLBACK     mpt_callbacks[MPT_MAX_CALLBACKS];
68
69
70 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
71
72  /* maximum controller queue depth */
73 #define MAX_HBA_QUEUE_DEPTH     30000
74 #define MAX_CHAIN_DEPTH         100000
75 static int max_queue_depth = -1;
76 module_param(max_queue_depth, int, 0444);
77 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
78
79 static int max_sgl_entries = -1;
80 module_param(max_sgl_entries, int, 0444);
81 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
82
83 static int msix_disable = -1;
84 module_param(msix_disable, int, 0444);
85 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
86
87 static int smp_affinity_enable = 1;
88 module_param(smp_affinity_enable, int, 0444);
89 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
90
91 static int max_msix_vectors = -1;
92 module_param(max_msix_vectors, int, 0444);
93 MODULE_PARM_DESC(max_msix_vectors,
94         " max msix vectors");
95
96 static int irqpoll_weight = -1;
97 module_param(irqpoll_weight, int, 0444);
98 MODULE_PARM_DESC(irqpoll_weight,
99         "irq poll weight (default= one fourth of HBA queue depth)");
100
101 static int mpt3sas_fwfault_debug;
102 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
103         " enable detection of firmware fault and halt firmware - (default=0)");
104
105 static int perf_mode = -1;
106 module_param(perf_mode, int, 0444);
107 MODULE_PARM_DESC(perf_mode,
108         "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
109         "0 - balanced: high iops mode is enabled &\n\t\t"
110         "interrupt coalescing is enabled only on high iops queues,\n\t\t"
111         "1 - iops: high iops mode is disabled &\n\t\t"
112         "interrupt coalescing is enabled on all queues,\n\t\t"
113         "2 - latency: high iops mode is disabled &\n\t\t"
114         "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
115         "\t\tdefault - default perf_mode is 'balanced'"
116         );
117
118 static int poll_queues;
119 module_param(poll_queues, int, 0444);
120 MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
121         "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
122         "when poll_queues are enabled then &\n\t\t"
123         "perf_mode is set to latency mode. &\n\t\t"
124         );
125
126 enum mpt3sas_perf_mode {
127         MPT_PERF_MODE_DEFAULT   = -1,
128         MPT_PERF_MODE_BALANCED  = 0,
129         MPT_PERF_MODE_IOPS      = 1,
130         MPT_PERF_MODE_LATENCY   = 2,
131 };
132
133 static int
134 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
135                 u32 ioc_state, int timeout);
136 static int
137 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
138 static void
139 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
140
141 /**
142  * mpt3sas_base_check_cmd_timeout - Function
143  *              to check timeout and command termination due
144  *              to Host reset.
145  *
146  * @ioc:        per adapter object.
147  * @status:     Status of issued command.
148  * @mpi_request:mf request pointer.
149  * @sz:         size of buffer.
150  *
151  * Return: 1/0 Reset to be done or Not
152  */
153 u8
154 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
155                 u8 status, void *mpi_request, int sz)
156 {
157         u8 issue_reset = 0;
158
159         if (!(status & MPT3_CMD_RESET))
160                 issue_reset = 1;
161
162         ioc_err(ioc, "Command %s\n",
163                 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
164         _debug_dump_mf(mpi_request, sz);
165
166         return issue_reset;
167 }
168
169 /**
170  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
171  * @val: ?
172  * @kp: ?
173  *
174  * Return: ?
175  */
176 static int
177 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
178 {
179         int ret = param_set_int(val, kp);
180         struct MPT3SAS_ADAPTER *ioc;
181
182         if (ret)
183                 return ret;
184
185         /* global ioc spinlock to protect controller list on list operations */
186         pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
187         spin_lock(&gioc_lock);
188         list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
189                 ioc->fwfault_debug = mpt3sas_fwfault_debug;
190         spin_unlock(&gioc_lock);
191         return 0;
192 }
193 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
194         param_get_int, &mpt3sas_fwfault_debug, 0644);
195
196 /**
197  * _base_readl_aero - retry readl for max three times.
198  * @addr: MPT Fusion system interface register address
199  *
200  * Retry the readl() for max three times if it gets zero value
201  * while reading the system interface register.
202  */
203 static inline u32
204 _base_readl_aero(const volatile void __iomem *addr)
205 {
206         u32 i = 0, ret_val;
207
208         do {
209                 ret_val = readl(addr);
210                 i++;
211         } while (ret_val == 0 && i < 3);
212
213         return ret_val;
214 }
215
216 static inline u32
217 _base_readl(const volatile void __iomem *addr)
218 {
219         return readl(addr);
220 }
221
222 /**
223  * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
224  *                                in BAR0 space.
225  *
226  * @ioc: per adapter object
227  * @reply: reply message frame(lower 32bit addr)
228  * @index: System request message index.
229  */
230 static void
231 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
232                 u32 index)
233 {
234         /*
235          * 256 is offset within sys register.
236          * 256 offset MPI frame starts. Max MPI frame supported is 32.
237          * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
238          */
239         u16 cmd_credit = ioc->facts.RequestCredit + 1;
240         void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
241                         MPI_FRAME_START_OFFSET +
242                         (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
243
244         writel(reply, reply_free_iomem);
245 }
246
247 /**
248  * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
249  *                              to system/BAR0 region.
250  *
251  * @dst_iomem: Pointer to the destination location in BAR0 space.
252  * @src: Pointer to the Source data.
253  * @size: Size of data to be copied.
254  */
255 static void
256 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
257 {
258         int i;
259         u32 *src_virt_mem = (u32 *)src;
260
261         for (i = 0; i < size/4; i++)
262                 writel((u32)src_virt_mem[i],
263                                 (void __iomem *)dst_iomem + (i * 4));
264 }
265
266 /**
267  * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
268  *
269  * @dst_iomem: Pointer to the destination location in BAR0 space.
270  * @src: Pointer to the Source data.
271  * @size: Size of data to be copied.
272  */
273 static void
274 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
275 {
276         int i;
277         u32 *src_virt_mem = (u32 *)(src);
278
279         for (i = 0; i < size/4; i++)
280                 writel((u32)src_virt_mem[i],
281                         (void __iomem *)dst_iomem + (i * 4));
282 }
283
284 /**
285  * _base_get_chain - Calculates and Returns virtual chain address
286  *                       for the provided smid in BAR0 space.
287  *
288  * @ioc: per adapter object
289  * @smid: system request message index
290  * @sge_chain_count: Scatter gather chain count.
291  *
292  * Return: the chain address.
293  */
294 static inline void __iomem*
295 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
296                 u8 sge_chain_count)
297 {
298         void __iomem *base_chain, *chain_virt;
299         u16 cmd_credit = ioc->facts.RequestCredit + 1;
300
301         base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
302                 (cmd_credit * ioc->request_sz) +
303                 REPLY_FREE_POOL_SIZE;
304         chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
305                         ioc->request_sz) + (sge_chain_count * ioc->request_sz);
306         return chain_virt;
307 }
308
309 /**
310  * _base_get_chain_phys - Calculates and Returns physical address
311  *                      in BAR0 for scatter gather chains, for
312  *                      the provided smid.
313  *
314  * @ioc: per adapter object
315  * @smid: system request message index
316  * @sge_chain_count: Scatter gather chain count.
317  *
318  * Return: Physical chain address.
319  */
320 static inline phys_addr_t
321 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
322                 u8 sge_chain_count)
323 {
324         phys_addr_t base_chain_phys, chain_phys;
325         u16 cmd_credit = ioc->facts.RequestCredit + 1;
326
327         base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
328                 (cmd_credit * ioc->request_sz) +
329                 REPLY_FREE_POOL_SIZE;
330         chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
331                         ioc->request_sz) + (sge_chain_count * ioc->request_sz);
332         return chain_phys;
333 }
334
335 /**
336  * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
337  *                      buffer address for the provided smid.
338  *                      (Each smid can have 64K starts from 17024)
339  *
340  * @ioc: per adapter object
341  * @smid: system request message index
342  *
343  * Return: Pointer to buffer location in BAR0.
344  */
345
346 static void __iomem *
347 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
348 {
349         u16 cmd_credit = ioc->facts.RequestCredit + 1;
350         // Added extra 1 to reach end of chain.
351         void __iomem *chain_end = _base_get_chain(ioc,
352                         cmd_credit + 1,
353                         ioc->facts.MaxChainDepth);
354         return chain_end + (smid * 64 * 1024);
355 }
356
357 /**
358  * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
359  *              Host buffer Physical address for the provided smid.
360  *              (Each smid can have 64K starts from 17024)
361  *
362  * @ioc: per adapter object
363  * @smid: system request message index
364  *
365  * Return: Pointer to buffer location in BAR0.
366  */
367 static phys_addr_t
368 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
369 {
370         u16 cmd_credit = ioc->facts.RequestCredit + 1;
371         phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
372                         cmd_credit + 1,
373                         ioc->facts.MaxChainDepth);
374         return chain_end_phys + (smid * 64 * 1024);
375 }
376
377 /**
378  * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
379  *                      lookup list and Provides chain_buffer
380  *                      address for the matching dma address.
381  *                      (Each smid can have 64K starts from 17024)
382  *
383  * @ioc: per adapter object
384  * @chain_buffer_dma: Chain buffer dma address.
385  *
386  * Return: Pointer to chain buffer. Or Null on Failure.
387  */
388 static void *
389 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
390                 dma_addr_t chain_buffer_dma)
391 {
392         u16 index, j;
393         struct chain_tracker *ct;
394
395         for (index = 0; index < ioc->scsiio_depth; index++) {
396                 for (j = 0; j < ioc->chains_needed_per_io; j++) {
397                         ct = &ioc->chain_lookup[index].chains_per_smid[j];
398                         if (ct && ct->chain_buffer_dma == chain_buffer_dma)
399                                 return ct->chain_buffer;
400                 }
401         }
402         ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
403         return NULL;
404 }
405
406 /**
407  * _clone_sg_entries -  MPI EP's scsiio and config requests
408  *                      are handled here. Base function for
409  *                      double buffering, before submitting
410  *                      the requests.
411  *
412  * @ioc: per adapter object.
413  * @mpi_request: mf request pointer.
414  * @smid: system request message index.
415  */
416 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
417                 void *mpi_request, u16 smid)
418 {
419         Mpi2SGESimple32_t *sgel, *sgel_next;
420         u32  sgl_flags, sge_chain_count = 0;
421         bool is_write = false;
422         u16 i = 0;
423         void __iomem *buffer_iomem;
424         phys_addr_t buffer_iomem_phys;
425         void __iomem *buff_ptr;
426         phys_addr_t buff_ptr_phys;
427         void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
428         void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
429         phys_addr_t dst_addr_phys;
430         MPI2RequestHeader_t *request_hdr;
431         struct scsi_cmnd *scmd;
432         struct scatterlist *sg_scmd = NULL;
433         int is_scsiio_req = 0;
434
435         request_hdr = (MPI2RequestHeader_t *) mpi_request;
436
437         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
438                 Mpi25SCSIIORequest_t *scsiio_request =
439                         (Mpi25SCSIIORequest_t *)mpi_request;
440                 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
441                 is_scsiio_req = 1;
442         } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
443                 Mpi2ConfigRequest_t  *config_req =
444                         (Mpi2ConfigRequest_t *)mpi_request;
445                 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
446         } else
447                 return;
448
449         /* From smid we can get scsi_cmd, once we have sg_scmd,
450          * we just need to get sg_virt and sg_next to get virtual
451          * address associated with sgel->Address.
452          */
453
454         if (is_scsiio_req) {
455                 /* Get scsi_cmd using smid */
456                 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
457                 if (scmd == NULL) {
458                         ioc_err(ioc, "scmd is NULL\n");
459                         return;
460                 }
461
462                 /* Get sg_scmd from scmd provided */
463                 sg_scmd = scsi_sglist(scmd);
464         }
465
466         /*
467          * 0 - 255      System register
468          * 256 - 4352   MPI Frame. (This is based on maxCredit 32)
469          * 4352 - 4864  Reply_free pool (512 byte is reserved
470          *              considering maxCredit 32. Reply need extra
471          *              room, for mCPU case kept four times of
472          *              maxCredit).
473          * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
474          *              128 byte size = 12288)
475          * 17152 - x    Host buffer mapped with smid.
476          *              (Each smid can have 64K Max IO.)
477          * BAR0+Last 1K MSIX Addr and Data
478          * Total size in use 2113664 bytes of 4MB BAR0
479          */
480
481         buffer_iomem = _base_get_buffer_bar0(ioc, smid);
482         buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
483
484         buff_ptr = buffer_iomem;
485         buff_ptr_phys = buffer_iomem_phys;
486         WARN_ON(buff_ptr_phys > U32_MAX);
487
488         if (le32_to_cpu(sgel->FlagsLength) &
489                         (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
490                 is_write = true;
491
492         for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
493
494                 sgl_flags =
495                     (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
496
497                 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
498                 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
499                         /*
500                          * Helper function which on passing
501                          * chain_buffer_dma returns chain_buffer. Get
502                          * the virtual address for sgel->Address
503                          */
504                         sgel_next =
505                                 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
506                                                 le32_to_cpu(sgel->Address));
507                         if (sgel_next == NULL)
508                                 return;
509                         /*
510                          * This is coping 128 byte chain
511                          * frame (not a host buffer)
512                          */
513                         dst_chain_addr[sge_chain_count] =
514                                 _base_get_chain(ioc,
515                                         smid, sge_chain_count);
516                         src_chain_addr[sge_chain_count] =
517                                                 (void *) sgel_next;
518                         dst_addr_phys = _base_get_chain_phys(ioc,
519                                                 smid, sge_chain_count);
520                         WARN_ON(dst_addr_phys > U32_MAX);
521                         sgel->Address =
522                                 cpu_to_le32(lower_32_bits(dst_addr_phys));
523                         sgel = sgel_next;
524                         sge_chain_count++;
525                         break;
526                 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
527                         if (is_write) {
528                                 if (is_scsiio_req) {
529                                         _base_clone_to_sys_mem(buff_ptr,
530                                             sg_virt(sg_scmd),
531                                             (le32_to_cpu(sgel->FlagsLength) &
532                                             0x00ffffff));
533                                         /*
534                                          * FIXME: this relies on a a zero
535                                          * PCI mem_offset.
536                                          */
537                                         sgel->Address =
538                                             cpu_to_le32((u32)buff_ptr_phys);
539                                 } else {
540                                         _base_clone_to_sys_mem(buff_ptr,
541                                             ioc->config_vaddr,
542                                             (le32_to_cpu(sgel->FlagsLength) &
543                                             0x00ffffff));
544                                         sgel->Address =
545                                             cpu_to_le32((u32)buff_ptr_phys);
546                                 }
547                         }
548                         buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
549                             0x00ffffff);
550                         buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
551                             0x00ffffff);
552                         if ((le32_to_cpu(sgel->FlagsLength) &
553                             (MPI2_SGE_FLAGS_END_OF_BUFFER
554                                         << MPI2_SGE_FLAGS_SHIFT)))
555                                 goto eob_clone_chain;
556                         else {
557                                 /*
558                                  * Every single element in MPT will have
559                                  * associated sg_next. Better to sanity that
560                                  * sg_next is not NULL, but it will be a bug
561                                  * if it is null.
562                                  */
563                                 if (is_scsiio_req) {
564                                         sg_scmd = sg_next(sg_scmd);
565                                         if (sg_scmd)
566                                                 sgel++;
567                                         else
568                                                 goto eob_clone_chain;
569                                 }
570                         }
571                         break;
572                 }
573         }
574
575 eob_clone_chain:
576         for (i = 0; i < sge_chain_count; i++) {
577                 if (is_scsiio_req)
578                         _base_clone_to_sys_mem(dst_chain_addr[i],
579                                 src_chain_addr[i], ioc->request_sz);
580         }
581 }
582
583 /**
584  *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
585  * @arg: input argument, used to derive ioc
586  *
587  * Return:
588  * 0 if controller is removed from pci subsystem.
589  * -1 for other case.
590  */
591 static int mpt3sas_remove_dead_ioc_func(void *arg)
592 {
593         struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
594         struct pci_dev *pdev;
595
596         if (!ioc)
597                 return -1;
598
599         pdev = ioc->pdev;
600         if (!pdev)
601                 return -1;
602         pci_stop_and_remove_bus_device_locked(pdev);
603         return 0;
604 }
605
606 /**
607  * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
608  * @ioc: Per Adapter Object
609  *
610  * Return: nothing.
611  */
612 static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
613 {
614         Mpi26IoUnitControlRequest_t *mpi_request;
615         Mpi26IoUnitControlReply_t *mpi_reply;
616         u16 smid;
617         ktime_t current_time;
618         u64 TimeStamp = 0;
619         u8 issue_reset = 0;
620
621         mutex_lock(&ioc->scsih_cmds.mutex);
622         if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
623                 ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
624                 goto out;
625         }
626         ioc->scsih_cmds.status = MPT3_CMD_PENDING;
627         smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
628         if (!smid) {
629                 ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
630                 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
631                 goto out;
632         }
633         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
634         ioc->scsih_cmds.smid = smid;
635         memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
636         mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
637         mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
638         mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
639         current_time = ktime_get_real();
640         TimeStamp = ktime_to_ms(current_time);
641         mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
642         mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
643         init_completion(&ioc->scsih_cmds.done);
644         ioc->put_smid_default(ioc, smid);
645         dinitprintk(ioc, ioc_info(ioc,
646             "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
647             TimeStamp));
648         wait_for_completion_timeout(&ioc->scsih_cmds.done,
649                 MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
650         if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
651                 mpt3sas_check_cmd_timeout(ioc,
652                     ioc->scsih_cmds.status, mpi_request,
653                     sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
654                 goto issue_host_reset;
655         }
656         if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
657                 mpi_reply = ioc->scsih_cmds.reply;
658                 dinitprintk(ioc, ioc_info(ioc,
659                     "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
660                     le16_to_cpu(mpi_reply->IOCStatus),
661                     le32_to_cpu(mpi_reply->IOCLogInfo)));
662         }
663 issue_host_reset:
664         if (issue_reset)
665                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
666         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
667 out:
668         mutex_unlock(&ioc->scsih_cmds.mutex);
669 }
670
671 /**
672  * _base_fault_reset_work - workq handling ioc fault conditions
673  * @work: input argument, used to derive ioc
674  *
675  * Context: sleep.
676  */
677 static void
678 _base_fault_reset_work(struct work_struct *work)
679 {
680         struct MPT3SAS_ADAPTER *ioc =
681             container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
682         unsigned long    flags;
683         u32 doorbell;
684         int rc;
685         struct task_struct *p;
686
687
688         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
689         if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
690                         ioc->pci_error_recovery)
691                 goto rearm_timer;
692         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
693
694         doorbell = mpt3sas_base_get_iocstate(ioc, 0);
695         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
696                 ioc_err(ioc, "SAS host is non-operational !!!!\n");
697
698                 /* It may be possible that EEH recovery can resolve some of
699                  * pci bus failure issues rather removing the dead ioc function
700                  * by considering controller is in a non-operational state. So
701                  * here priority is given to the EEH recovery. If it doesn't
702                  * not resolve this issue, mpt3sas driver will consider this
703                  * controller to non-operational state and remove the dead ioc
704                  * function.
705                  */
706                 if (ioc->non_operational_loop++ < 5) {
707                         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
708                                                          flags);
709                         goto rearm_timer;
710                 }
711
712                 /*
713                  * Call _scsih_flush_pending_cmds callback so that we flush all
714                  * pending commands back to OS. This call is required to avoid
715                  * deadlock at block layer. Dead IOC will fail to do diag reset,
716                  * and this call is safe since dead ioc will never return any
717                  * command back from HW.
718                  */
719                 mpt3sas_base_pause_mq_polling(ioc);
720                 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
721                 /*
722                  * Set remove_host flag early since kernel thread will
723                  * take some time to execute.
724                  */
725                 ioc->remove_host = 1;
726                 /*Remove the Dead Host */
727                 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
728                     "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
729                 if (IS_ERR(p))
730                         ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
731                                 __func__);
732                 else
733                         ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
734                                 __func__);
735                 return; /* don't rearm timer */
736         }
737
738         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
739                 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
740                     ioc->manu_pg11.CoreDumpTOSec :
741                     MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
742
743                 timeout /= (FAULT_POLLING_INTERVAL/1000);
744
745                 if (ioc->ioc_coredump_loop == 0) {
746                         mpt3sas_print_coredump_info(ioc,
747                             doorbell & MPI2_DOORBELL_DATA_MASK);
748                         /* do not accept any IOs and disable the interrupts */
749                         spin_lock_irqsave(
750                             &ioc->ioc_reset_in_progress_lock, flags);
751                         ioc->shost_recovery = 1;
752                         spin_unlock_irqrestore(
753                             &ioc->ioc_reset_in_progress_lock, flags);
754                         mpt3sas_base_mask_interrupts(ioc);
755                         mpt3sas_base_pause_mq_polling(ioc);
756                         _base_clear_outstanding_commands(ioc);
757                 }
758
759                 ioc_info(ioc, "%s: CoreDump loop %d.",
760                     __func__, ioc->ioc_coredump_loop);
761
762                 /* Wait until CoreDump completes or times out */
763                 if (ioc->ioc_coredump_loop++ < timeout) {
764                         spin_lock_irqsave(
765                             &ioc->ioc_reset_in_progress_lock, flags);
766                         goto rearm_timer;
767                 }
768         }
769
770         if (ioc->ioc_coredump_loop) {
771                 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
772                         ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
773                             __func__, ioc->ioc_coredump_loop);
774                 else
775                         ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
776                             __func__, ioc->ioc_coredump_loop);
777                 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
778         }
779         ioc->non_operational_loop = 0;
780         if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
781                 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
782                 ioc_warn(ioc, "%s: hard reset: %s\n",
783                          __func__, rc == 0 ? "success" : "failed");
784                 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
785                 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
786                         mpt3sas_print_fault_code(ioc, doorbell &
787                             MPI2_DOORBELL_DATA_MASK);
788                 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
789                     MPI2_IOC_STATE_COREDUMP)
790                         mpt3sas_print_coredump_info(ioc, doorbell &
791                             MPI2_DOORBELL_DATA_MASK);
792                 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
793                     MPI2_IOC_STATE_OPERATIONAL)
794                         return; /* don't rearm timer */
795         }
796         ioc->ioc_coredump_loop = 0;
797         if (ioc->time_sync_interval &&
798             ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
799                 ioc->timestamp_update_count = 0;
800                 _base_sync_drv_fw_timestamp(ioc);
801         }
802         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
803  rearm_timer:
804         if (ioc->fault_reset_work_q)
805                 queue_delayed_work(ioc->fault_reset_work_q,
806                     &ioc->fault_reset_work,
807                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
808         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
809 }
810
811 /**
812  * mpt3sas_base_start_watchdog - start the fault_reset_work_q
813  * @ioc: per adapter object
814  *
815  * Context: sleep.
816  */
817 void
818 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
819 {
820         unsigned long    flags;
821
822         if (ioc->fault_reset_work_q)
823                 return;
824
825         ioc->timestamp_update_count = 0;
826         /* initialize fault polling */
827
828         INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
829         snprintf(ioc->fault_reset_work_q_name,
830             sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
831             ioc->driver_name, ioc->id);
832         ioc->fault_reset_work_q =
833                 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
834         if (!ioc->fault_reset_work_q) {
835                 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
836                 return;
837         }
838         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
839         if (ioc->fault_reset_work_q)
840                 queue_delayed_work(ioc->fault_reset_work_q,
841                     &ioc->fault_reset_work,
842                     msecs_to_jiffies(FAULT_POLLING_INTERVAL));
843         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
844 }
845
846 /**
847  * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
848  * @ioc: per adapter object
849  *
850  * Context: sleep.
851  */
852 void
853 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
854 {
855         unsigned long flags;
856         struct workqueue_struct *wq;
857
858         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
859         wq = ioc->fault_reset_work_q;
860         ioc->fault_reset_work_q = NULL;
861         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
862         if (wq) {
863                 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
864                         flush_workqueue(wq);
865                 destroy_workqueue(wq);
866         }
867 }
868
869 /**
870  * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
871  * @ioc: per adapter object
872  * @fault_code: fault code
873  */
874 void
875 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
876 {
877         ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
878 }
879
880 /**
881  * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
882  * @ioc: per adapter object
883  * @fault_code: fault code
884  *
885  * Return: nothing.
886  */
887 void
888 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
889 {
890         ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
891 }
892
893 /**
894  * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
895  * completes or times out
896  * @ioc: per adapter object
897  * @caller: caller function name
898  *
899  * Return: 0 for success, non-zero for failure.
900  */
901 int
902 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
903                 const char *caller)
904 {
905         u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
906                         ioc->manu_pg11.CoreDumpTOSec :
907                         MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
908
909         int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
910                                         timeout);
911
912         if (ioc_state)
913                 ioc_err(ioc,
914                     "%s: CoreDump timed out. (ioc_state=0x%x)\n",
915                     caller, ioc_state);
916         else
917                 ioc_info(ioc,
918                     "%s: CoreDump completed. (ioc_state=0x%x)\n",
919                     caller, ioc_state);
920
921         return ioc_state;
922 }
923
924 /**
925  * mpt3sas_halt_firmware - halt's mpt controller firmware
926  * @ioc: per adapter object
927  *
928  * For debugging timeout related issues.  Writing 0xCOFFEE00
929  * to the doorbell register will halt controller firmware. With
930  * the purpose to stop both driver and firmware, the enduser can
931  * obtain a ring buffer from controller UART.
932  */
933 void
934 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
935 {
936         u32 doorbell;
937
938         if (!ioc->fwfault_debug)
939                 return;
940
941         dump_stack();
942
943         doorbell = ioc->base_readl(&ioc->chip->Doorbell);
944         if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
945                 mpt3sas_print_fault_code(ioc, doorbell &
946                     MPI2_DOORBELL_DATA_MASK);
947         } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
948             MPI2_IOC_STATE_COREDUMP) {
949                 mpt3sas_print_coredump_info(ioc, doorbell &
950                     MPI2_DOORBELL_DATA_MASK);
951         } else {
952                 writel(0xC0FFEE00, &ioc->chip->Doorbell);
953                 ioc_err(ioc, "Firmware is halted due to command timeout\n");
954         }
955
956         if (ioc->fwfault_debug == 2)
957                 for (;;)
958                         ;
959         else
960                 panic("panic in %s\n", __func__);
961 }
962
963 /**
964  * _base_sas_ioc_info - verbose translation of the ioc status
965  * @ioc: per adapter object
966  * @mpi_reply: reply mf payload returned from firmware
967  * @request_hdr: request mf
968  */
969 static void
970 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
971         MPI2RequestHeader_t *request_hdr)
972 {
973         u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
974             MPI2_IOCSTATUS_MASK;
975         char *desc = NULL;
976         u16 frame_sz;
977         char *func_str = NULL;
978
979         /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
980         if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
981             request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
982             request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
983                 return;
984
985         if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
986                 return;
987         /*
988          * Older Firmware version doesn't support driver trigger pages.
989          * So, skip displaying 'config invalid type' type
990          * of error message.
991          */
992         if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
993                 Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
994
995                 if ((rqst->ExtPageType ==
996                     MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
997                     !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
998                         return;
999                 }
1000         }
1001
1002         switch (ioc_status) {
1003
1004 /****************************************************************************
1005 *  Common IOCStatus values for all replies
1006 ****************************************************************************/
1007
1008         case MPI2_IOCSTATUS_INVALID_FUNCTION:
1009                 desc = "invalid function";
1010                 break;
1011         case MPI2_IOCSTATUS_BUSY:
1012                 desc = "busy";
1013                 break;
1014         case MPI2_IOCSTATUS_INVALID_SGL:
1015                 desc = "invalid sgl";
1016                 break;
1017         case MPI2_IOCSTATUS_INTERNAL_ERROR:
1018                 desc = "internal error";
1019                 break;
1020         case MPI2_IOCSTATUS_INVALID_VPID:
1021                 desc = "invalid vpid";
1022                 break;
1023         case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
1024                 desc = "insufficient resources";
1025                 break;
1026         case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
1027                 desc = "insufficient power";
1028                 break;
1029         case MPI2_IOCSTATUS_INVALID_FIELD:
1030                 desc = "invalid field";
1031                 break;
1032         case MPI2_IOCSTATUS_INVALID_STATE:
1033                 desc = "invalid state";
1034                 break;
1035         case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
1036                 desc = "op state not supported";
1037                 break;
1038
1039 /****************************************************************************
1040 *  Config IOCStatus values
1041 ****************************************************************************/
1042
1043         case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
1044                 desc = "config invalid action";
1045                 break;
1046         case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
1047                 desc = "config invalid type";
1048                 break;
1049         case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
1050                 desc = "config invalid page";
1051                 break;
1052         case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
1053                 desc = "config invalid data";
1054                 break;
1055         case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
1056                 desc = "config no defaults";
1057                 break;
1058         case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
1059                 desc = "config can't commit";
1060                 break;
1061
1062 /****************************************************************************
1063 *  SCSI IO Reply
1064 ****************************************************************************/
1065
1066         case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1067         case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1068         case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1069         case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1070         case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1071         case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1072         case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1073         case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1074         case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1075         case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1076         case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1077         case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1078                 break;
1079
1080 /****************************************************************************
1081 *  For use by SCSI Initiator and SCSI Target end-to-end data protection
1082 ****************************************************************************/
1083
1084         case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1085                 desc = "eedp guard error";
1086                 break;
1087         case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1088                 desc = "eedp ref tag error";
1089                 break;
1090         case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1091                 desc = "eedp app tag error";
1092                 break;
1093
1094 /****************************************************************************
1095 *  SCSI Target values
1096 ****************************************************************************/
1097
1098         case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1099                 desc = "target invalid io index";
1100                 break;
1101         case MPI2_IOCSTATUS_TARGET_ABORTED:
1102                 desc = "target aborted";
1103                 break;
1104         case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1105                 desc = "target no conn retryable";
1106                 break;
1107         case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1108                 desc = "target no connection";
1109                 break;
1110         case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1111                 desc = "target xfer count mismatch";
1112                 break;
1113         case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1114                 desc = "target data offset error";
1115                 break;
1116         case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1117                 desc = "target too much write data";
1118                 break;
1119         case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1120                 desc = "target iu too short";
1121                 break;
1122         case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1123                 desc = "target ack nak timeout";
1124                 break;
1125         case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1126                 desc = "target nak received";
1127                 break;
1128
1129 /****************************************************************************
1130 *  Serial Attached SCSI values
1131 ****************************************************************************/
1132
1133         case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1134                 desc = "smp request failed";
1135                 break;
1136         case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1137                 desc = "smp data overrun";
1138                 break;
1139
1140 /****************************************************************************
1141 *  Diagnostic Buffer Post / Diagnostic Release values
1142 ****************************************************************************/
1143
1144         case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1145                 desc = "diagnostic released";
1146                 break;
1147         default:
1148                 break;
1149         }
1150
1151         if (!desc)
1152                 return;
1153
1154         switch (request_hdr->Function) {
1155         case MPI2_FUNCTION_CONFIG:
1156                 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1157                 func_str = "config_page";
1158                 break;
1159         case MPI2_FUNCTION_SCSI_TASK_MGMT:
1160                 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1161                 func_str = "task_mgmt";
1162                 break;
1163         case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1164                 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1165                 func_str = "sas_iounit_ctl";
1166                 break;
1167         case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1168                 frame_sz = sizeof(Mpi2SepRequest_t);
1169                 func_str = "enclosure";
1170                 break;
1171         case MPI2_FUNCTION_IOC_INIT:
1172                 frame_sz = sizeof(Mpi2IOCInitRequest_t);
1173                 func_str = "ioc_init";
1174                 break;
1175         case MPI2_FUNCTION_PORT_ENABLE:
1176                 frame_sz = sizeof(Mpi2PortEnableRequest_t);
1177                 func_str = "port_enable";
1178                 break;
1179         case MPI2_FUNCTION_SMP_PASSTHROUGH:
1180                 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1181                 func_str = "smp_passthru";
1182                 break;
1183         case MPI2_FUNCTION_NVME_ENCAPSULATED:
1184                 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1185                     ioc->sge_size;
1186                 func_str = "nvme_encapsulated";
1187                 break;
1188         default:
1189                 frame_sz = 32;
1190                 func_str = "unknown";
1191                 break;
1192         }
1193
1194         ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1195                  desc, ioc_status, request_hdr, func_str);
1196
1197         _debug_dump_mf(request_hdr, frame_sz/4);
1198 }
1199
1200 /**
1201  * _base_display_event_data - verbose translation of firmware asyn events
1202  * @ioc: per adapter object
1203  * @mpi_reply: reply mf payload returned from firmware
1204  */
1205 static void
1206 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1207         Mpi2EventNotificationReply_t *mpi_reply)
1208 {
1209         char *desc = NULL;
1210         u16 event;
1211
1212         if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1213                 return;
1214
1215         event = le16_to_cpu(mpi_reply->Event);
1216
1217         switch (event) {
1218         case MPI2_EVENT_LOG_DATA:
1219                 desc = "Log Data";
1220                 break;
1221         case MPI2_EVENT_STATE_CHANGE:
1222                 desc = "Status Change";
1223                 break;
1224         case MPI2_EVENT_HARD_RESET_RECEIVED:
1225                 desc = "Hard Reset Received";
1226                 break;
1227         case MPI2_EVENT_EVENT_CHANGE:
1228                 desc = "Event Change";
1229                 break;
1230         case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1231                 desc = "Device Status Change";
1232                 break;
1233         case MPI2_EVENT_IR_OPERATION_STATUS:
1234                 if (!ioc->hide_ir_msg)
1235                         desc = "IR Operation Status";
1236                 break;
1237         case MPI2_EVENT_SAS_DISCOVERY:
1238         {
1239                 Mpi2EventDataSasDiscovery_t *event_data =
1240                     (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1241                 ioc_info(ioc, "Discovery: (%s)",
1242                          event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1243                          "start" : "stop");
1244                 if (event_data->DiscoveryStatus)
1245                         pr_cont(" discovery_status(0x%08x)",
1246                             le32_to_cpu(event_data->DiscoveryStatus));
1247                 pr_cont("\n");
1248                 return;
1249         }
1250         case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1251                 desc = "SAS Broadcast Primitive";
1252                 break;
1253         case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1254                 desc = "SAS Init Device Status Change";
1255                 break;
1256         case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1257                 desc = "SAS Init Table Overflow";
1258                 break;
1259         case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1260                 desc = "SAS Topology Change List";
1261                 break;
1262         case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1263                 desc = "SAS Enclosure Device Status Change";
1264                 break;
1265         case MPI2_EVENT_IR_VOLUME:
1266                 if (!ioc->hide_ir_msg)
1267                         desc = "IR Volume";
1268                 break;
1269         case MPI2_EVENT_IR_PHYSICAL_DISK:
1270                 if (!ioc->hide_ir_msg)
1271                         desc = "IR Physical Disk";
1272                 break;
1273         case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1274                 if (!ioc->hide_ir_msg)
1275                         desc = "IR Configuration Change List";
1276                 break;
1277         case MPI2_EVENT_LOG_ENTRY_ADDED:
1278                 if (!ioc->hide_ir_msg)
1279                         desc = "Log Entry Added";
1280                 break;
1281         case MPI2_EVENT_TEMP_THRESHOLD:
1282                 desc = "Temperature Threshold";
1283                 break;
1284         case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1285                 desc = "Cable Event";
1286                 break;
1287         case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1288                 desc = "SAS Device Discovery Error";
1289                 break;
1290         case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1291                 desc = "PCIE Device Status Change";
1292                 break;
1293         case MPI2_EVENT_PCIE_ENUMERATION:
1294         {
1295                 Mpi26EventDataPCIeEnumeration_t *event_data =
1296                         (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1297                 ioc_info(ioc, "PCIE Enumeration: (%s)",
1298                          event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1299                          "start" : "stop");
1300                 if (event_data->EnumerationStatus)
1301                         pr_cont("enumeration_status(0x%08x)",
1302                                 le32_to_cpu(event_data->EnumerationStatus));
1303                 pr_cont("\n");
1304                 return;
1305         }
1306         case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1307                 desc = "PCIE Topology Change List";
1308                 break;
1309         }
1310
1311         if (!desc)
1312                 return;
1313
1314         ioc_info(ioc, "%s\n", desc);
1315 }
1316
1317 /**
1318  * _base_sas_log_info - verbose translation of firmware log info
1319  * @ioc: per adapter object
1320  * @log_info: log info
1321  */
1322 static void
1323 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
1324 {
1325         union loginfo_type {
1326                 u32     loginfo;
1327                 struct {
1328                         u32     subcode:16;
1329                         u32     code:8;
1330                         u32     originator:4;
1331                         u32     bus_type:4;
1332                 } dw;
1333         };
1334         union loginfo_type sas_loginfo;
1335         char *originator_str = NULL;
1336
1337         sas_loginfo.loginfo = log_info;
1338         if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1339                 return;
1340
1341         /* each nexus loss loginfo */
1342         if (log_info == 0x31170000)
1343                 return;
1344
1345         /* eat the loginfos associated with task aborts */
1346         if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1347             0x31140000 || log_info == 0x31130000))
1348                 return;
1349
1350         switch (sas_loginfo.dw.originator) {
1351         case 0:
1352                 originator_str = "IOP";
1353                 break;
1354         case 1:
1355                 originator_str = "PL";
1356                 break;
1357         case 2:
1358                 if (!ioc->hide_ir_msg)
1359                         originator_str = "IR";
1360                 else
1361                         originator_str = "WarpDrive";
1362                 break;
1363         }
1364
1365         ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1366                  log_info,
1367                  originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1368 }
1369
1370 /**
1371  * _base_display_reply_info - handle reply descriptors depending on IOC Status
1372  * @ioc: per adapter object
1373  * @smid: system request message index
1374  * @msix_index: MSIX table index supplied by the OS
1375  * @reply: reply message frame (lower 32bit addr)
1376  */
1377 static void
1378 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1379         u32 reply)
1380 {
1381         MPI2DefaultReply_t *mpi_reply;
1382         u16 ioc_status;
1383         u32 loginfo = 0;
1384
1385         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1386         if (unlikely(!mpi_reply)) {
1387                 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1388                         __FILE__, __LINE__, __func__);
1389                 return;
1390         }
1391         ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1392
1393         if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1394             (ioc->logging_level & MPT_DEBUG_REPLY)) {
1395                 _base_sas_ioc_info(ioc, mpi_reply,
1396                    mpt3sas_base_get_msg_frame(ioc, smid));
1397         }
1398
1399         if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1400                 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1401                 _base_sas_log_info(ioc, loginfo);
1402         }
1403
1404         if (ioc_status || loginfo) {
1405                 ioc_status &= MPI2_IOCSTATUS_MASK;
1406                 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1407         }
1408 }
1409
1410 /**
1411  * mpt3sas_base_done - base internal command completion routine
1412  * @ioc: per adapter object
1413  * @smid: system request message index
1414  * @msix_index: MSIX table index supplied by the OS
1415  * @reply: reply message frame(lower 32bit addr)
1416  *
1417  * Return:
1418  * 1 meaning mf should be freed from _base_interrupt
1419  * 0 means the mf is freed from this function.
1420  */
1421 u8
1422 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1423         u32 reply)
1424 {
1425         MPI2DefaultReply_t *mpi_reply;
1426
1427         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1428         if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1429                 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1430
1431         if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1432                 return 1;
1433
1434         ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1435         if (mpi_reply) {
1436                 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1437                 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1438         }
1439         ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1440
1441         complete(&ioc->base_cmds.done);
1442         return 1;
1443 }
1444
1445 /**
1446  * _base_async_event - main callback handler for firmware asyn events
1447  * @ioc: per adapter object
1448  * @msix_index: MSIX table index supplied by the OS
1449  * @reply: reply message frame(lower 32bit addr)
1450  *
1451  * Return:
1452  * 1 meaning mf should be freed from _base_interrupt
1453  * 0 means the mf is freed from this function.
1454  */
1455 static u8
1456 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1457 {
1458         Mpi2EventNotificationReply_t *mpi_reply;
1459         Mpi2EventAckRequest_t *ack_request;
1460         u16 smid;
1461         struct _event_ack_list *delayed_event_ack;
1462
1463         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1464         if (!mpi_reply)
1465                 return 1;
1466         if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1467                 return 1;
1468
1469         _base_display_event_data(ioc, mpi_reply);
1470
1471         if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1472                 goto out;
1473         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1474         if (!smid) {
1475                 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1476                                         GFP_ATOMIC);
1477                 if (!delayed_event_ack)
1478                         goto out;
1479                 INIT_LIST_HEAD(&delayed_event_ack->list);
1480                 delayed_event_ack->Event = mpi_reply->Event;
1481                 delayed_event_ack->EventContext = mpi_reply->EventContext;
1482                 list_add_tail(&delayed_event_ack->list,
1483                                 &ioc->delayed_event_ack_list);
1484                 dewtprintk(ioc,
1485                            ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1486                                     le16_to_cpu(mpi_reply->Event)));
1487                 goto out;
1488         }
1489
1490         ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1491         memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1492         ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1493         ack_request->Event = mpi_reply->Event;
1494         ack_request->EventContext = mpi_reply->EventContext;
1495         ack_request->VF_ID = 0;  /* TODO */
1496         ack_request->VP_ID = 0;
1497         ioc->put_smid_default(ioc, smid);
1498
1499  out:
1500
1501         /* scsih callback handler */
1502         mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1503
1504         /* ctl callback handler */
1505         mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1506
1507         return 1;
1508 }
1509
1510 static struct scsiio_tracker *
1511 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1512 {
1513         struct scsi_cmnd *cmd;
1514
1515         if (WARN_ON(!smid) ||
1516             WARN_ON(smid >= ioc->hi_priority_smid))
1517                 return NULL;
1518
1519         cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1520         if (cmd)
1521                 return scsi_cmd_priv(cmd);
1522
1523         return NULL;
1524 }
1525
1526 /**
1527  * _base_get_cb_idx - obtain the callback index
1528  * @ioc: per adapter object
1529  * @smid: system request message index
1530  *
1531  * Return: callback index.
1532  */
1533 static u8
1534 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1535 {
1536         int i;
1537         u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1538         u8 cb_idx = 0xFF;
1539
1540         if (smid < ioc->hi_priority_smid) {
1541                 struct scsiio_tracker *st;
1542
1543                 if (smid < ctl_smid) {
1544                         st = _get_st_from_smid(ioc, smid);
1545                         if (st)
1546                                 cb_idx = st->cb_idx;
1547                 } else if (smid == ctl_smid)
1548                         cb_idx = ioc->ctl_cb_idx;
1549         } else if (smid < ioc->internal_smid) {
1550                 i = smid - ioc->hi_priority_smid;
1551                 cb_idx = ioc->hpr_lookup[i].cb_idx;
1552         } else if (smid <= ioc->hba_queue_depth) {
1553                 i = smid - ioc->internal_smid;
1554                 cb_idx = ioc->internal_lookup[i].cb_idx;
1555         }
1556         return cb_idx;
1557 }
1558
1559 /**
1560  * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
1561  *                              when driver is flushing out the IOs.
1562  * @ioc: per adapter object
1563  *
1564  * Pause polling on the mq poll (io uring) queues when driver is flushing
1565  * out the IOs. Otherwise we may see the race condition of completing the same
1566  * IO from two paths.
1567  *
1568  * Returns nothing.
1569  */
1570 void
1571 mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1572 {
1573         int iopoll_q_count =
1574             ioc->reply_queue_count - ioc->iopoll_q_start_index;
1575         int qid;
1576
1577         for (qid = 0; qid < iopoll_q_count; qid++)
1578                 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
1579
1580         /*
1581          * wait for current poll to complete.
1582          */
1583         for (qid = 0; qid < iopoll_q_count; qid++) {
1584                 while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
1585                         cpu_relax();
1586                         udelay(500);
1587                 }
1588         }
1589 }
1590
1591 /**
1592  * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
1593  * @ioc: per adapter object
1594  *
1595  * Returns nothing.
1596  */
1597 void
1598 mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1599 {
1600         int iopoll_q_count =
1601             ioc->reply_queue_count - ioc->iopoll_q_start_index;
1602         int qid;
1603
1604         for (qid = 0; qid < iopoll_q_count; qid++)
1605                 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
1606 }
1607
1608 /**
1609  * mpt3sas_base_mask_interrupts - disable interrupts
1610  * @ioc: per adapter object
1611  *
1612  * Disabling ResetIRQ, Reply and Doorbell Interrupts
1613  */
1614 void
1615 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1616 {
1617         u32 him_register;
1618
1619         ioc->mask_interrupts = 1;
1620         him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1621         him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1622         writel(him_register, &ioc->chip->HostInterruptMask);
1623         ioc->base_readl(&ioc->chip->HostInterruptMask);
1624 }
1625
1626 /**
1627  * mpt3sas_base_unmask_interrupts - enable interrupts
1628  * @ioc: per adapter object
1629  *
1630  * Enabling only Reply Interrupts
1631  */
1632 void
1633 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1634 {
1635         u32 him_register;
1636
1637         him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1638         him_register &= ~MPI2_HIM_RIM;
1639         writel(him_register, &ioc->chip->HostInterruptMask);
1640         ioc->mask_interrupts = 0;
1641 }
1642
1643 union reply_descriptor {
1644         u64 word;
1645         struct {
1646                 u32 low;
1647                 u32 high;
1648         } u;
1649 };
1650
1651 static u32 base_mod64(u64 dividend, u32 divisor)
1652 {
1653         u32 remainder;
1654
1655         if (!divisor)
1656                 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1657         remainder = do_div(dividend, divisor);
1658         return remainder;
1659 }
1660
1661 /**
1662  * _base_process_reply_queue - Process reply descriptors from reply
1663  *              descriptor post queue.
1664  * @reply_q: per IRQ's reply queue object.
1665  *
1666  * Return: number of reply descriptors processed from reply
1667  *              descriptor queue.
1668  */
1669 static int
1670 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1671 {
1672         union reply_descriptor rd;
1673         u64 completed_cmds;
1674         u8 request_descript_type;
1675         u16 smid;
1676         u8 cb_idx;
1677         u32 reply;
1678         u8 msix_index = reply_q->msix_index;
1679         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1680         Mpi2ReplyDescriptorsUnion_t *rpf;
1681         u8 rc;
1682
1683         completed_cmds = 0;
1684         if (!atomic_add_unless(&reply_q->busy, 1, 1))
1685                 return completed_cmds;
1686
1687         rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1688         request_descript_type = rpf->Default.ReplyFlags
1689              & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1690         if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1691                 atomic_dec(&reply_q->busy);
1692                 return completed_cmds;
1693         }
1694
1695         cb_idx = 0xFF;
1696         do {
1697                 rd.word = le64_to_cpu(rpf->Words);
1698                 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1699                         goto out;
1700                 reply = 0;
1701                 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1702                 if (request_descript_type ==
1703                     MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1704                     request_descript_type ==
1705                     MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1706                     request_descript_type ==
1707                     MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1708                         cb_idx = _base_get_cb_idx(ioc, smid);
1709                         if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1710                             (likely(mpt_callbacks[cb_idx] != NULL))) {
1711                                 rc = mpt_callbacks[cb_idx](ioc, smid,
1712                                     msix_index, 0);
1713                                 if (rc)
1714                                         mpt3sas_base_free_smid(ioc, smid);
1715                         }
1716                 } else if (request_descript_type ==
1717                     MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1718                         reply = le32_to_cpu(
1719                             rpf->AddressReply.ReplyFrameAddress);
1720                         if (reply > ioc->reply_dma_max_address ||
1721                             reply < ioc->reply_dma_min_address)
1722                                 reply = 0;
1723                         if (smid) {
1724                                 cb_idx = _base_get_cb_idx(ioc, smid);
1725                                 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1726                                     (likely(mpt_callbacks[cb_idx] != NULL))) {
1727                                         rc = mpt_callbacks[cb_idx](ioc, smid,
1728                                             msix_index, reply);
1729                                         if (reply)
1730                                                 _base_display_reply_info(ioc,
1731                                                     smid, msix_index, reply);
1732                                         if (rc)
1733                                                 mpt3sas_base_free_smid(ioc,
1734                                                     smid);
1735                                 }
1736                         } else {
1737                                 _base_async_event(ioc, msix_index, reply);
1738                         }
1739
1740                         /* reply free queue handling */
1741                         if (reply) {
1742                                 ioc->reply_free_host_index =
1743                                     (ioc->reply_free_host_index ==
1744                                     (ioc->reply_free_queue_depth - 1)) ?
1745                                     0 : ioc->reply_free_host_index + 1;
1746                                 ioc->reply_free[ioc->reply_free_host_index] =
1747                                     cpu_to_le32(reply);
1748                                 if (ioc->is_mcpu_endpoint)
1749                                         _base_clone_reply_to_sys_mem(ioc,
1750                                                 reply,
1751                                                 ioc->reply_free_host_index);
1752                                 writel(ioc->reply_free_host_index,
1753                                     &ioc->chip->ReplyFreeHostIndex);
1754                         }
1755                 }
1756
1757                 rpf->Words = cpu_to_le64(ULLONG_MAX);
1758                 reply_q->reply_post_host_index =
1759                     (reply_q->reply_post_host_index ==
1760                     (ioc->reply_post_queue_depth - 1)) ? 0 :
1761                     reply_q->reply_post_host_index + 1;
1762                 request_descript_type =
1763                     reply_q->reply_post_free[reply_q->reply_post_host_index].
1764                     Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1765                 completed_cmds++;
1766                 /* Update the reply post host index after continuously
1767                  * processing the threshold number of Reply Descriptors.
1768                  * So that FW can find enough entries to post the Reply
1769                  * Descriptors in the reply descriptor post queue.
1770                  */
1771                 if (completed_cmds >= ioc->thresh_hold) {
1772                         if (ioc->combined_reply_queue) {
1773                                 writel(reply_q->reply_post_host_index |
1774                                                 ((msix_index  & 7) <<
1775                                                  MPI2_RPHI_MSIX_INDEX_SHIFT),
1776                                     ioc->replyPostRegisterIndex[msix_index/8]);
1777                         } else {
1778                                 writel(reply_q->reply_post_host_index |
1779                                                 (msix_index <<
1780                                                  MPI2_RPHI_MSIX_INDEX_SHIFT),
1781                                                 &ioc->chip->ReplyPostHostIndex);
1782                         }
1783                         if (!reply_q->is_iouring_poll_q &&
1784                             !reply_q->irq_poll_scheduled) {
1785                                 reply_q->irq_poll_scheduled = true;
1786                                 irq_poll_sched(&reply_q->irqpoll);
1787                         }
1788                         atomic_dec(&reply_q->busy);
1789                         return completed_cmds;
1790                 }
1791                 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1792                         goto out;
1793                 if (!reply_q->reply_post_host_index)
1794                         rpf = reply_q->reply_post_free;
1795                 else
1796                         rpf++;
1797         } while (1);
1798
1799  out:
1800
1801         if (!completed_cmds) {
1802                 atomic_dec(&reply_q->busy);
1803                 return completed_cmds;
1804         }
1805
1806         if (ioc->is_warpdrive) {
1807                 writel(reply_q->reply_post_host_index,
1808                 ioc->reply_post_host_index[msix_index]);
1809                 atomic_dec(&reply_q->busy);
1810                 return completed_cmds;
1811         }
1812
1813         /* Update Reply Post Host Index.
1814          * For those HBA's which support combined reply queue feature
1815          * 1. Get the correct Supplemental Reply Post Host Index Register.
1816          *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1817          *    Index Register address bank i.e replyPostRegisterIndex[],
1818          * 2. Then update this register with new reply host index value
1819          *    in ReplyPostIndex field and the MSIxIndex field with
1820          *    msix_index value reduced to a value between 0 and 7,
1821          *    using a modulo 8 operation. Since each Supplemental Reply Post
1822          *    Host Index Register supports 8 MSI-X vectors.
1823          *
1824          * For other HBA's just update the Reply Post Host Index register with
1825          * new reply host index value in ReplyPostIndex Field and msix_index
1826          * value in MSIxIndex field.
1827          */
1828         if (ioc->combined_reply_queue)
1829                 writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1830                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1831                         ioc->replyPostRegisterIndex[msix_index/8]);
1832         else
1833                 writel(reply_q->reply_post_host_index | (msix_index <<
1834                         MPI2_RPHI_MSIX_INDEX_SHIFT),
1835                         &ioc->chip->ReplyPostHostIndex);
1836         atomic_dec(&reply_q->busy);
1837         return completed_cmds;
1838 }
1839
1840 /**
1841  * mpt3sas_blk_mq_poll - poll the blk mq poll queue
1842  * @shost: Scsi_Host object
1843  * @queue_num: hw ctx queue number
1844  *
1845  * Return number of entries that has been processed from poll queue.
1846  */
1847 int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
1848 {
1849         struct MPT3SAS_ADAPTER *ioc =
1850             (struct MPT3SAS_ADAPTER *)shost->hostdata;
1851         struct adapter_reply_queue *reply_q;
1852         int num_entries = 0;
1853         int qid = queue_num - ioc->iopoll_q_start_index;
1854
1855         if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
1856             !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
1857                 return 0;
1858
1859         reply_q = ioc->io_uring_poll_queues[qid].reply_q;
1860
1861         num_entries = _base_process_reply_queue(reply_q);
1862         atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
1863
1864         return num_entries;
1865 }
1866
1867 /**
1868  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1869  * @irq: irq number (not used)
1870  * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1871  *
1872  * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1873  */
1874 static irqreturn_t
1875 _base_interrupt(int irq, void *bus_id)
1876 {
1877         struct adapter_reply_queue *reply_q = bus_id;
1878         struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1879
1880         if (ioc->mask_interrupts)
1881                 return IRQ_NONE;
1882         if (reply_q->irq_poll_scheduled)
1883                 return IRQ_HANDLED;
1884         return ((_base_process_reply_queue(reply_q) > 0) ?
1885                         IRQ_HANDLED : IRQ_NONE);
1886 }
1887
1888 /**
1889  * _base_irqpoll - IRQ poll callback handler
1890  * @irqpoll: irq_poll object
1891  * @budget: irq poll weight
1892  *
1893  * Return: number of reply descriptors processed
1894  */
1895 static int
1896 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1897 {
1898         struct adapter_reply_queue *reply_q;
1899         int num_entries = 0;
1900
1901         reply_q = container_of(irqpoll, struct adapter_reply_queue,
1902                         irqpoll);
1903         if (reply_q->irq_line_enable) {
1904                 disable_irq_nosync(reply_q->os_irq);
1905                 reply_q->irq_line_enable = false;
1906         }
1907         num_entries = _base_process_reply_queue(reply_q);
1908         if (num_entries < budget) {
1909                 irq_poll_complete(irqpoll);
1910                 reply_q->irq_poll_scheduled = false;
1911                 reply_q->irq_line_enable = true;
1912                 enable_irq(reply_q->os_irq);
1913                 /*
1914                  * Go for one more round of processing the
1915                  * reply descriptor post queue in case the HBA
1916                  * Firmware has posted some reply descriptors
1917                  * while reenabling the IRQ.
1918                  */
1919                 _base_process_reply_queue(reply_q);
1920         }
1921
1922         return num_entries;
1923 }
1924
1925 /**
1926  * _base_init_irqpolls - initliaze IRQ polls
1927  * @ioc: per adapter object
1928  *
1929  * Return: nothing
1930  */
1931 static void
1932 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1933 {
1934         struct adapter_reply_queue *reply_q, *next;
1935
1936         if (list_empty(&ioc->reply_queue_list))
1937                 return;
1938
1939         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1940                 if (reply_q->is_iouring_poll_q)
1941                         continue;
1942                 irq_poll_init(&reply_q->irqpoll,
1943                         ioc->hba_queue_depth/4, _base_irqpoll);
1944                 reply_q->irq_poll_scheduled = false;
1945                 reply_q->irq_line_enable = true;
1946                 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1947                     reply_q->msix_index);
1948         }
1949 }
1950
1951 /**
1952  * _base_is_controller_msix_enabled - is controller support muli-reply queues
1953  * @ioc: per adapter object
1954  *
1955  * Return: Whether or not MSI/X is enabled.
1956  */
1957 static inline int
1958 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1959 {
1960         return (ioc->facts.IOCCapabilities &
1961             MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1962 }
1963
1964 /**
1965  * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1966  * @ioc: per adapter object
1967  * @poll: poll over reply descriptor pools incase interrupt for
1968  *              timed-out SCSI command got delayed
1969  * Context: non-ISR context
1970  *
1971  * Called when a Task Management request has completed.
1972  */
1973 void
1974 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1975 {
1976         struct adapter_reply_queue *reply_q;
1977
1978         /* If MSIX capability is turned off
1979          * then multi-queues are not enabled
1980          */
1981         if (!_base_is_controller_msix_enabled(ioc))
1982                 return;
1983
1984         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1985                 if (ioc->shost_recovery || ioc->remove_host ||
1986                                 ioc->pci_error_recovery)
1987                         return;
1988                 /* TMs are on msix_index == 0 */
1989                 if (reply_q->msix_index == 0)
1990                         continue;
1991
1992                 if (reply_q->is_iouring_poll_q) {
1993                         _base_process_reply_queue(reply_q);
1994                         continue;
1995                 }
1996
1997                 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1998                 if (reply_q->irq_poll_scheduled) {
1999                         /* Calling irq_poll_disable will wait for any pending
2000                          * callbacks to have completed.
2001                          */
2002                         irq_poll_disable(&reply_q->irqpoll);
2003                         irq_poll_enable(&reply_q->irqpoll);
2004                         /* check how the scheduled poll has ended,
2005                          * clean up only if necessary
2006                          */
2007                         if (reply_q->irq_poll_scheduled) {
2008                                 reply_q->irq_poll_scheduled = false;
2009                                 reply_q->irq_line_enable = true;
2010                                 enable_irq(reply_q->os_irq);
2011                         }
2012                 }
2013
2014                 if (poll)
2015                         _base_process_reply_queue(reply_q);
2016         }
2017 }
2018
2019 /**
2020  * mpt3sas_base_release_callback_handler - clear interrupt callback handler
2021  * @cb_idx: callback index
2022  */
2023 void
2024 mpt3sas_base_release_callback_handler(u8 cb_idx)
2025 {
2026         mpt_callbacks[cb_idx] = NULL;
2027 }
2028
2029 /**
2030  * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
2031  * @cb_func: callback function
2032  *
2033  * Return: Index of @cb_func.
2034  */
2035 u8
2036 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
2037 {
2038         u8 cb_idx;
2039
2040         for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
2041                 if (mpt_callbacks[cb_idx] == NULL)
2042                         break;
2043
2044         mpt_callbacks[cb_idx] = cb_func;
2045         return cb_idx;
2046 }
2047
2048 /**
2049  * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
2050  */
2051 void
2052 mpt3sas_base_initialize_callback_handler(void)
2053 {
2054         u8 cb_idx;
2055
2056         for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
2057                 mpt3sas_base_release_callback_handler(cb_idx);
2058 }
2059
2060
2061 /**
2062  * _base_build_zero_len_sge - build zero length sg entry
2063  * @ioc: per adapter object
2064  * @paddr: virtual address for SGE
2065  *
2066  * Create a zero length scatter gather entry to insure the IOCs hardware has
2067  * something to use if the target device goes brain dead and tries
2068  * to send data even when none is asked for.
2069  */
2070 static void
2071 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2072 {
2073         u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
2074             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
2075             MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
2076             MPI2_SGE_FLAGS_SHIFT);
2077         ioc->base_add_sg_single(paddr, flags_length, -1);
2078 }
2079
2080 /**
2081  * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
2082  * @paddr: virtual address for SGE
2083  * @flags_length: SGE flags and data transfer length
2084  * @dma_addr: Physical address
2085  */
2086 static void
2087 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2088 {
2089         Mpi2SGESimple32_t *sgel = paddr;
2090
2091         flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
2092             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2093         sgel->FlagsLength = cpu_to_le32(flags_length);
2094         sgel->Address = cpu_to_le32(dma_addr);
2095 }
2096
2097
2098 /**
2099  * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
2100  * @paddr: virtual address for SGE
2101  * @flags_length: SGE flags and data transfer length
2102  * @dma_addr: Physical address
2103  */
2104 static void
2105 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2106 {
2107         Mpi2SGESimple64_t *sgel = paddr;
2108
2109         flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2110             MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2111         sgel->FlagsLength = cpu_to_le32(flags_length);
2112         sgel->Address = cpu_to_le64(dma_addr);
2113 }
2114
2115 /**
2116  * _base_get_chain_buffer_tracker - obtain chain tracker
2117  * @ioc: per adapter object
2118  * @scmd: SCSI commands of the IO request
2119  *
2120  * Return: chain tracker from chain_lookup table using key as
2121  * smid and smid's chain_offset.
2122  */
2123 static struct chain_tracker *
2124 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
2125                                struct scsi_cmnd *scmd)
2126 {
2127         struct chain_tracker *chain_req;
2128         struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2129         u16 smid = st->smid;
2130         u8 chain_offset =
2131            atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
2132
2133         if (chain_offset == ioc->chains_needed_per_io)
2134                 return NULL;
2135
2136         chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
2137         atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
2138         return chain_req;
2139 }
2140
2141
2142 /**
2143  * _base_build_sg - build generic sg
2144  * @ioc: per adapter object
2145  * @psge: virtual address for SGE
2146  * @data_out_dma: physical address for WRITES
2147  * @data_out_sz: data xfer size for WRITES
2148  * @data_in_dma: physical address for READS
2149  * @data_in_sz: data xfer size for READS
2150  */
2151 static void
2152 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
2153         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2154         size_t data_in_sz)
2155 {
2156         u32 sgl_flags;
2157
2158         if (!data_out_sz && !data_in_sz) {
2159                 _base_build_zero_len_sge(ioc, psge);
2160                 return;
2161         }
2162
2163         if (data_out_sz && data_in_sz) {
2164                 /* WRITE sgel first */
2165                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2166                     MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2167                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2168                 ioc->base_add_sg_single(psge, sgl_flags |
2169                     data_out_sz, data_out_dma);
2170
2171                 /* incr sgel */
2172                 psge += ioc->sge_size;
2173
2174                 /* READ sgel last */
2175                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2176                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2177                     MPI2_SGE_FLAGS_END_OF_LIST);
2178                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2179                 ioc->base_add_sg_single(psge, sgl_flags |
2180                     data_in_sz, data_in_dma);
2181         } else if (data_out_sz) /* WRITE */ {
2182                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2183                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2184                     MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2185                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2186                 ioc->base_add_sg_single(psge, sgl_flags |
2187                     data_out_sz, data_out_dma);
2188         } else if (data_in_sz) /* READ */ {
2189                 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2190                     MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2191                     MPI2_SGE_FLAGS_END_OF_LIST);
2192                 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2193                 ioc->base_add_sg_single(psge, sgl_flags |
2194                     data_in_sz, data_in_dma);
2195         }
2196 }
2197
2198 /* IEEE format sgls */
2199
2200 /**
2201  * _base_build_nvme_prp - This function is called for NVMe end devices to build
2202  *                        a native SGL (NVMe PRP).
2203  * @ioc: per adapter object
2204  * @smid: system request message index for getting asscociated SGL
2205  * @nvme_encap_request: the NVMe request msg frame pointer
2206  * @data_out_dma: physical address for WRITES
2207  * @data_out_sz: data xfer size for WRITES
2208  * @data_in_dma: physical address for READS
2209  * @data_in_sz: data xfer size for READS
2210  *
2211  * The native SGL is built starting in the first PRP
2212  * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
2213  * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
2214  * used to describe a larger data buffer.  If the data buffer is too large to
2215  * describe using the two PRP entriess inside the NVMe message, then PRP1
2216  * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2217  * list located elsewhere in memory to describe the remaining data memory
2218  * segments.  The PRP list will be contiguous.
2219  *
2220  * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
2221  * consists of a list of PRP entries to describe a number of noncontigous
2222  * physical memory segments as a single memory buffer, just as a SGL does.  Note
2223  * however, that this function is only used by the IOCTL call, so the memory
2224  * given will be guaranteed to be contiguous.  There is no need to translate
2225  * non-contiguous SGL into a PRP in this case.  All PRPs will describe
2226  * contiguous space that is one page size each.
2227  *
2228  * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
2229  * a PRP list pointer or a PRP element, depending upon the command.  PRP2
2230  * contains the second PRP element if the memory being described fits within 2
2231  * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2232  *
2233  * A PRP list pointer contains the address of a PRP list, structured as a linear
2234  * array of PRP entries.  Each PRP entry in this list describes a segment of
2235  * physical memory.
2236  *
2237  * Each 64-bit PRP entry comprises an address and an offset field.  The address
2238  * always points at the beginning of a 4KB physical memory page, and the offset
2239  * describes where within that 4KB page the memory segment begins.  Only the
2240  * first element in a PRP list may contain a non-zero offset, implying that all
2241  * memory segments following the first begin at the start of a 4KB page.
2242  *
2243  * Each PRP element normally describes 4KB of physical memory, with exceptions
2244  * for the first and last elements in the list.  If the memory being described
2245  * by the list begins at a non-zero offset within the first 4KB page, then the
2246  * first PRP element will contain a non-zero offset indicating where the region
2247  * begins within the 4KB page.  The last memory segment may end before the end
2248  * of the 4KB segment, depending upon the overall size of the memory being
2249  * described by the PRP list.
2250  *
2251  * Since PRP entries lack any indication of size, the overall data buffer length
2252  * is used to determine where the end of the data memory buffer is located, and
2253  * how many PRP entries are required to describe it.
2254  */
2255 static void
2256 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2257         Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2258         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2259         size_t data_in_sz)
2260 {
2261         int             prp_size = NVME_PRP_SIZE;
2262         __le64          *prp_entry, *prp1_entry, *prp2_entry;
2263         __le64          *prp_page;
2264         dma_addr_t      prp_entry_dma, prp_page_dma, dma_addr;
2265         u32             offset, entry_len;
2266         u32             page_mask_result, page_mask;
2267         size_t          length;
2268         struct mpt3sas_nvme_cmd *nvme_cmd =
2269                 (void *)nvme_encap_request->NVMe_Command;
2270
2271         /*
2272          * Not all commands require a data transfer. If no data, just return
2273          * without constructing any PRP.
2274          */
2275         if (!data_in_sz && !data_out_sz)
2276                 return;
2277         prp1_entry = &nvme_cmd->prp1;
2278         prp2_entry = &nvme_cmd->prp2;
2279         prp_entry = prp1_entry;
2280         /*
2281          * For the PRP entries, use the specially allocated buffer of
2282          * contiguous memory.
2283          */
2284         prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2285         prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2286
2287         /*
2288          * Check if we are within 1 entry of a page boundary we don't
2289          * want our first entry to be a PRP List entry.
2290          */
2291         page_mask = ioc->page_size - 1;
2292         page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2293         if (!page_mask_result) {
2294                 /* Bump up to next page boundary. */
2295                 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2296                 prp_page_dma = prp_page_dma + prp_size;
2297         }
2298
2299         /*
2300          * Set PRP physical pointer, which initially points to the current PRP
2301          * DMA memory page.
2302          */
2303         prp_entry_dma = prp_page_dma;
2304
2305         /* Get physical address and length of the data buffer. */
2306         if (data_in_sz) {
2307                 dma_addr = data_in_dma;
2308                 length = data_in_sz;
2309         } else {
2310                 dma_addr = data_out_dma;
2311                 length = data_out_sz;
2312         }
2313
2314         /* Loop while the length is not zero. */
2315         while (length) {
2316                 /*
2317                  * Check if we need to put a list pointer here if we are at
2318                  * page boundary - prp_size (8 bytes).
2319                  */
2320                 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2321                 if (!page_mask_result) {
2322                         /*
2323                          * This is the last entry in a PRP List, so we need to
2324                          * put a PRP list pointer here.  What this does is:
2325                          *   - bump the current memory pointer to the next
2326                          *     address, which will be the next full page.
2327                          *   - set the PRP Entry to point to that page.  This
2328                          *     is now the PRP List pointer.
2329                          *   - bump the PRP Entry pointer the start of the
2330                          *     next page.  Since all of this PRP memory is
2331                          *     contiguous, no need to get a new page - it's
2332                          *     just the next address.
2333                          */
2334                         prp_entry_dma++;
2335                         *prp_entry = cpu_to_le64(prp_entry_dma);
2336                         prp_entry++;
2337                 }
2338
2339                 /* Need to handle if entry will be part of a page. */
2340                 offset = dma_addr & page_mask;
2341                 entry_len = ioc->page_size - offset;
2342
2343                 if (prp_entry == prp1_entry) {
2344                         /*
2345                          * Must fill in the first PRP pointer (PRP1) before
2346                          * moving on.
2347                          */
2348                         *prp1_entry = cpu_to_le64(dma_addr);
2349
2350                         /*
2351                          * Now point to the second PRP entry within the
2352                          * command (PRP2).
2353                          */
2354                         prp_entry = prp2_entry;
2355                 } else if (prp_entry == prp2_entry) {
2356                         /*
2357                          * Should the PRP2 entry be a PRP List pointer or just
2358                          * a regular PRP pointer?  If there is more than one
2359                          * more page of data, must use a PRP List pointer.
2360                          */
2361                         if (length > ioc->page_size) {
2362                                 /*
2363                                  * PRP2 will contain a PRP List pointer because
2364                                  * more PRP's are needed with this command. The
2365                                  * list will start at the beginning of the
2366                                  * contiguous buffer.
2367                                  */
2368                                 *prp2_entry = cpu_to_le64(prp_entry_dma);
2369
2370                                 /*
2371                                  * The next PRP Entry will be the start of the
2372                                  * first PRP List.
2373                                  */
2374                                 prp_entry = prp_page;
2375                         } else {
2376                                 /*
2377                                  * After this, the PRP Entries are complete.
2378                                  * This command uses 2 PRP's and no PRP list.
2379                                  */
2380                                 *prp2_entry = cpu_to_le64(dma_addr);
2381                         }
2382                 } else {
2383                         /*
2384                          * Put entry in list and bump the addresses.
2385                          *
2386                          * After PRP1 and PRP2 are filled in, this will fill in
2387                          * all remaining PRP entries in a PRP List, one per
2388                          * each time through the loop.
2389                          */
2390                         *prp_entry = cpu_to_le64(dma_addr);
2391                         prp_entry++;
2392                         prp_entry_dma++;
2393                 }
2394
2395                 /*
2396                  * Bump the phys address of the command's data buffer by the
2397                  * entry_len.
2398                  */
2399                 dma_addr += entry_len;
2400
2401                 /* Decrement length accounting for last partial page. */
2402                 if (entry_len > length)
2403                         length = 0;
2404                 else
2405                         length -= entry_len;
2406         }
2407 }
2408
2409 /**
2410  * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
2411  *                      SGLs specific to NVMe drives only
2412  *
2413  * @ioc:                per adapter object
2414  * @scmd:               SCSI command from the mid-layer
2415  * @mpi_request:        mpi request
2416  * @smid:               msg Index
2417  * @sge_count:          scatter gather element count.
2418  *
2419  * Return:              true: PRPs are built
2420  *                      false: IEEE SGLs needs to be built
2421  */
2422 static void
2423 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2424                 struct scsi_cmnd *scmd,
2425                 Mpi25SCSIIORequest_t *mpi_request,
2426                 u16 smid, int sge_count)
2427 {
2428         int sge_len, num_prp_in_chain = 0;
2429         Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2430         __le64 *curr_buff;
2431         dma_addr_t msg_dma, sge_addr, offset;
2432         u32 page_mask, page_mask_result;
2433         struct scatterlist *sg_scmd;
2434         u32 first_prp_len;
2435         int data_len = scsi_bufflen(scmd);
2436         u32 nvme_pg_size;
2437
2438         nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2439         /*
2440          * Nvme has a very convoluted prp format.  One prp is required
2441          * for each page or partial page. Driver need to split up OS sg_list
2442          * entries if it is longer than one page or cross a page
2443          * boundary.  Driver also have to insert a PRP list pointer entry as
2444          * the last entry in each physical page of the PRP list.
2445          *
2446          * NOTE: The first PRP "entry" is actually placed in the first
2447          * SGL entry in the main message as IEEE 64 format.  The 2nd
2448          * entry in the main message is the chain element, and the rest
2449          * of the PRP entries are built in the contiguous pcie buffer.
2450          */
2451         page_mask = nvme_pg_size - 1;
2452
2453         /*
2454          * Native SGL is needed.
2455          * Put a chain element in main message frame that points to the first
2456          * chain buffer.
2457          *
2458          * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
2459          *        a native SGL.
2460          */
2461
2462         /* Set main message chain element pointer */
2463         main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2464         /*
2465          * For NVMe the chain element needs to be the 2nd SG entry in the main
2466          * message.
2467          */
2468         main_chain_element = (Mpi25IeeeSgeChain64_t *)
2469                 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2470
2471         /*
2472          * For the PRP entries, use the specially allocated buffer of
2473          * contiguous memory.  Normal chain buffers can't be used
2474          * because each chain buffer would need to be the size of an OS
2475          * page (4k).
2476          */
2477         curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2478         msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2479
2480         main_chain_element->Address = cpu_to_le64(msg_dma);
2481         main_chain_element->NextChainOffset = 0;
2482         main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2483                         MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2484                         MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2485
2486         /* Build first prp, sge need not to be page aligned*/
2487         ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2488         sg_scmd = scsi_sglist(scmd);
2489         sge_addr = sg_dma_address(sg_scmd);
2490         sge_len = sg_dma_len(sg_scmd);
2491
2492         offset = sge_addr & page_mask;
2493         first_prp_len = nvme_pg_size - offset;
2494
2495         ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2496         ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2497
2498         data_len -= first_prp_len;
2499
2500         if (sge_len > first_prp_len) {
2501                 sge_addr += first_prp_len;
2502                 sge_len -= first_prp_len;
2503         } else if (data_len && (sge_len == first_prp_len)) {
2504                 sg_scmd = sg_next(sg_scmd);
2505                 sge_addr = sg_dma_address(sg_scmd);
2506                 sge_len = sg_dma_len(sg_scmd);
2507         }
2508
2509         for (;;) {
2510                 offset = sge_addr & page_mask;
2511
2512                 /* Put PRP pointer due to page boundary*/
2513                 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2514                 if (unlikely(!page_mask_result)) {
2515                         scmd_printk(KERN_NOTICE,
2516                                 scmd, "page boundary curr_buff: 0x%p\n",
2517                                 curr_buff);
2518                         msg_dma += 8;
2519                         *curr_buff = cpu_to_le64(msg_dma);
2520                         curr_buff++;
2521                         num_prp_in_chain++;
2522                 }
2523
2524                 *curr_buff = cpu_to_le64(sge_addr);
2525                 curr_buff++;
2526                 msg_dma += 8;
2527                 num_prp_in_chain++;
2528
2529                 sge_addr += nvme_pg_size;
2530                 sge_len -= nvme_pg_size;
2531                 data_len -= nvme_pg_size;
2532
2533                 if (data_len <= 0)
2534                         break;
2535
2536                 if (sge_len > 0)
2537                         continue;
2538
2539                 sg_scmd = sg_next(sg_scmd);
2540                 sge_addr = sg_dma_address(sg_scmd);
2541                 sge_len = sg_dma_len(sg_scmd);
2542         }
2543
2544         main_chain_element->Length =
2545                 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2546         return;
2547 }
2548
2549 static bool
2550 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2551         struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2552 {
2553         u32 data_length = 0;
2554         bool build_prp = true;
2555
2556         data_length = scsi_bufflen(scmd);
2557         if (pcie_device &&
2558             (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2559                 build_prp = false;
2560                 return build_prp;
2561         }
2562
2563         /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2564          * we built IEEE SGL
2565          */
2566         if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2567                 build_prp = false;
2568
2569         return build_prp;
2570 }
2571
2572 /**
2573  * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2574  * determine if the driver needs to build a native SGL.  If so, that native
2575  * SGL is built in the special contiguous buffers allocated especially for
2576  * PCIe SGL creation.  If the driver will not build a native SGL, return
2577  * TRUE and a normal IEEE SGL will be built.  Currently this routine
2578  * supports NVMe.
2579  * @ioc: per adapter object
2580  * @mpi_request: mf request pointer
2581  * @smid: system request message index
2582  * @scmd: scsi command
2583  * @pcie_device: points to the PCIe device's info
2584  *
2585  * Return: 0 if native SGL was built, 1 if no SGL was built
2586  */
2587 static int
2588 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2589         Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2590         struct _pcie_device *pcie_device)
2591 {
2592         int sges_left;
2593
2594         /* Get the SG list pointer and info. */
2595         sges_left = scsi_dma_map(scmd);
2596         if (sges_left < 0)
2597                 return 1;
2598
2599         /* Check if we need to build a native SG list. */
2600         if (!base_is_prp_possible(ioc, pcie_device,
2601                                 scmd, sges_left)) {
2602                 /* We built a native SG list, just return. */
2603                 goto out;
2604         }
2605
2606         /*
2607          * Build native NVMe PRP.
2608          */
2609         base_make_prp_nvme(ioc, scmd, mpi_request,
2610                         smid, sges_left);
2611
2612         return 0;
2613 out:
2614         scsi_dma_unmap(scmd);
2615         return 1;
2616 }
2617
2618 /**
2619  * _base_add_sg_single_ieee - add sg element for IEEE format
2620  * @paddr: virtual address for SGE
2621  * @flags: SGE flags
2622  * @chain_offset: number of 128 byte elements from start of segment
2623  * @length: data transfer length
2624  * @dma_addr: Physical address
2625  */
2626 static void
2627 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2628         dma_addr_t dma_addr)
2629 {
2630         Mpi25IeeeSgeChain64_t *sgel = paddr;
2631
2632         sgel->Flags = flags;
2633         sgel->NextChainOffset = chain_offset;
2634         sgel->Length = cpu_to_le32(length);
2635         sgel->Address = cpu_to_le64(dma_addr);
2636 }
2637
2638 /**
2639  * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2640  * @ioc: per adapter object
2641  * @paddr: virtual address for SGE
2642  *
2643  * Create a zero length scatter gather entry to insure the IOCs hardware has
2644  * something to use if the target device goes brain dead and tries
2645  * to send data even when none is asked for.
2646  */
2647 static void
2648 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2649 {
2650         u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2651                 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2652                 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2653
2654         _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2655 }
2656
2657 /**
2658  * _base_build_sg_scmd - main sg creation routine
2659  *              pcie_device is unused here!
2660  * @ioc: per adapter object
2661  * @scmd: scsi command
2662  * @smid: system request message index
2663  * @unused: unused pcie_device pointer
2664  * Context: none.
2665  *
2666  * The main routine that builds scatter gather table from a given
2667  * scsi request sent via the .queuecommand main handler.
2668  *
2669  * Return: 0 success, anything else error
2670  */
2671 static int
2672 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2673         struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2674 {
2675         Mpi2SCSIIORequest_t *mpi_request;
2676         dma_addr_t chain_dma;
2677         struct scatterlist *sg_scmd;
2678         void *sg_local, *chain;
2679         u32 chain_offset;
2680         u32 chain_length;
2681         u32 chain_flags;
2682         int sges_left;
2683         u32 sges_in_segment;
2684         u32 sgl_flags;
2685         u32 sgl_flags_last_element;
2686         u32 sgl_flags_end_buffer;
2687         struct chain_tracker *chain_req;
2688
2689         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2690
2691         /* init scatter gather flags */
2692         sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2693         if (scmd->sc_data_direction == DMA_TO_DEVICE)
2694                 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2695         sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2696             << MPI2_SGE_FLAGS_SHIFT;
2697         sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2698             MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2699             << MPI2_SGE_FLAGS_SHIFT;
2700         sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2701
2702         sg_scmd = scsi_sglist(scmd);
2703         sges_left = scsi_dma_map(scmd);
2704         if (sges_left < 0)
2705                 return -ENOMEM;
2706
2707         sg_local = &mpi_request->SGL;
2708         sges_in_segment = ioc->max_sges_in_main_message;
2709         if (sges_left <= sges_in_segment)
2710                 goto fill_in_last_segment;
2711
2712         mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2713             (sges_in_segment * ioc->sge_size))/4;
2714
2715         /* fill in main message segment when there is a chain following */
2716         while (sges_in_segment) {
2717                 if (sges_in_segment == 1)
2718                         ioc->base_add_sg_single(sg_local,
2719                             sgl_flags_last_element | sg_dma_len(sg_scmd),
2720                             sg_dma_address(sg_scmd));
2721                 else
2722                         ioc->base_add_sg_single(sg_local, sgl_flags |
2723                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2724                 sg_scmd = sg_next(sg_scmd);
2725                 sg_local += ioc->sge_size;
2726                 sges_left--;
2727                 sges_in_segment--;
2728         }
2729
2730         /* initializing the chain flags and pointers */
2731         chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2732         chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2733         if (!chain_req)
2734                 return -1;
2735         chain = chain_req->chain_buffer;
2736         chain_dma = chain_req->chain_buffer_dma;
2737         do {
2738                 sges_in_segment = (sges_left <=
2739                     ioc->max_sges_in_chain_message) ? sges_left :
2740                     ioc->max_sges_in_chain_message;
2741                 chain_offset = (sges_left == sges_in_segment) ?
2742                     0 : (sges_in_segment * ioc->sge_size)/4;
2743                 chain_length = sges_in_segment * ioc->sge_size;
2744                 if (chain_offset) {
2745                         chain_offset = chain_offset <<
2746                             MPI2_SGE_CHAIN_OFFSET_SHIFT;
2747                         chain_length += ioc->sge_size;
2748                 }
2749                 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2750                     chain_length, chain_dma);
2751                 sg_local = chain;
2752                 if (!chain_offset)
2753                         goto fill_in_last_segment;
2754
2755                 /* fill in chain segments */
2756                 while (sges_in_segment) {
2757                         if (sges_in_segment == 1)
2758                                 ioc->base_add_sg_single(sg_local,
2759                                     sgl_flags_last_element |
2760                                     sg_dma_len(sg_scmd),
2761                                     sg_dma_address(sg_scmd));
2762                         else
2763                                 ioc->base_add_sg_single(sg_local, sgl_flags |
2764                                     sg_dma_len(sg_scmd),
2765                                     sg_dma_address(sg_scmd));
2766                         sg_scmd = sg_next(sg_scmd);
2767                         sg_local += ioc->sge_size;
2768                         sges_left--;
2769                         sges_in_segment--;
2770                 }
2771
2772                 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2773                 if (!chain_req)
2774                         return -1;
2775                 chain = chain_req->chain_buffer;
2776                 chain_dma = chain_req->chain_buffer_dma;
2777         } while (1);
2778
2779
2780  fill_in_last_segment:
2781
2782         /* fill the last segment */
2783         while (sges_left) {
2784                 if (sges_left == 1)
2785                         ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2786                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2787                 else
2788                         ioc->base_add_sg_single(sg_local, sgl_flags |
2789                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2790                 sg_scmd = sg_next(sg_scmd);
2791                 sg_local += ioc->sge_size;
2792                 sges_left--;
2793         }
2794
2795         return 0;
2796 }
2797
2798 /**
2799  * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2800  * @ioc: per adapter object
2801  * @scmd: scsi command
2802  * @smid: system request message index
2803  * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2804  * constructed on need.
2805  * Context: none.
2806  *
2807  * The main routine that builds scatter gather table from a given
2808  * scsi request sent via the .queuecommand main handler.
2809  *
2810  * Return: 0 success, anything else error
2811  */
2812 static int
2813 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2814         struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2815 {
2816         Mpi25SCSIIORequest_t *mpi_request;
2817         dma_addr_t chain_dma;
2818         struct scatterlist *sg_scmd;
2819         void *sg_local, *chain;
2820         u32 chain_offset;
2821         u32 chain_length;
2822         int sges_left;
2823         u32 sges_in_segment;
2824         u8 simple_sgl_flags;
2825         u8 simple_sgl_flags_last;
2826         u8 chain_sgl_flags;
2827         struct chain_tracker *chain_req;
2828
2829         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2830
2831         /* init scatter gather flags */
2832         simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2833             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2834         simple_sgl_flags_last = simple_sgl_flags |
2835             MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2836         chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2837             MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2838
2839         /* Check if we need to build a native SG list. */
2840         if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2841                         smid, scmd, pcie_device) == 0)) {
2842                 /* We built a native SG list, just return. */
2843                 return 0;
2844         }
2845
2846         sg_scmd = scsi_sglist(scmd);
2847         sges_left = scsi_dma_map(scmd);
2848         if (sges_left < 0)
2849                 return -ENOMEM;
2850
2851         sg_local = &mpi_request->SGL;
2852         sges_in_segment = (ioc->request_sz -
2853                    offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2854         if (sges_left <= sges_in_segment)
2855                 goto fill_in_last_segment;
2856
2857         mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2858             (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2859
2860         /* fill in main message segment when there is a chain following */
2861         while (sges_in_segment > 1) {
2862                 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2863                     sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2864                 sg_scmd = sg_next(sg_scmd);
2865                 sg_local += ioc->sge_size_ieee;
2866                 sges_left--;
2867                 sges_in_segment--;
2868         }
2869
2870         /* initializing the pointers */
2871         chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2872         if (!chain_req)
2873                 return -1;
2874         chain = chain_req->chain_buffer;
2875         chain_dma = chain_req->chain_buffer_dma;
2876         do {
2877                 sges_in_segment = (sges_left <=
2878                     ioc->max_sges_in_chain_message) ? sges_left :
2879                     ioc->max_sges_in_chain_message;
2880                 chain_offset = (sges_left == sges_in_segment) ?
2881                     0 : sges_in_segment;
2882                 chain_length = sges_in_segment * ioc->sge_size_ieee;
2883                 if (chain_offset)
2884                         chain_length += ioc->sge_size_ieee;
2885                 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2886                     chain_offset, chain_length, chain_dma);
2887
2888                 sg_local = chain;
2889                 if (!chain_offset)
2890                         goto fill_in_last_segment;
2891
2892                 /* fill in chain segments */
2893                 while (sges_in_segment) {
2894                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2895                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2896                         sg_scmd = sg_next(sg_scmd);
2897                         sg_local += ioc->sge_size_ieee;
2898                         sges_left--;
2899                         sges_in_segment--;
2900                 }
2901
2902                 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2903                 if (!chain_req)
2904                         return -1;
2905                 chain = chain_req->chain_buffer;
2906                 chain_dma = chain_req->chain_buffer_dma;
2907         } while (1);
2908
2909
2910  fill_in_last_segment:
2911
2912         /* fill the last segment */
2913         while (sges_left > 0) {
2914                 if (sges_left == 1)
2915                         _base_add_sg_single_ieee(sg_local,
2916                             simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2917                             sg_dma_address(sg_scmd));
2918                 else
2919                         _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2920                             sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2921                 sg_scmd = sg_next(sg_scmd);
2922                 sg_local += ioc->sge_size_ieee;
2923                 sges_left--;
2924         }
2925
2926         return 0;
2927 }
2928
2929 /**
2930  * _base_build_sg_ieee - build generic sg for IEEE format
2931  * @ioc: per adapter object
2932  * @psge: virtual address for SGE
2933  * @data_out_dma: physical address for WRITES
2934  * @data_out_sz: data xfer size for WRITES
2935  * @data_in_dma: physical address for READS
2936  * @data_in_sz: data xfer size for READS
2937  */
2938 static void
2939 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2940         dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2941         size_t data_in_sz)
2942 {
2943         u8 sgl_flags;
2944
2945         if (!data_out_sz && !data_in_sz) {
2946                 _base_build_zero_len_sge_ieee(ioc, psge);
2947                 return;
2948         }
2949
2950         if (data_out_sz && data_in_sz) {
2951                 /* WRITE sgel first */
2952                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2953                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2954                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2955                     data_out_dma);
2956
2957                 /* incr sgel */
2958                 psge += ioc->sge_size_ieee;
2959
2960                 /* READ sgel last */
2961                 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2962                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2963                     data_in_dma);
2964         } else if (data_out_sz) /* WRITE */ {
2965                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2966                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2967                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2968                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2969                     data_out_dma);
2970         } else if (data_in_sz) /* READ */ {
2971                 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2972                     MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2973                     MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2974                 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2975                     data_in_dma);
2976         }
2977 }
2978
2979 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2980
2981 /**
2982  * _base_config_dma_addressing - set dma addressing
2983  * @ioc: per adapter object
2984  * @pdev: PCI device struct
2985  *
2986  * Return: 0 for success, non-zero for failure.
2987  */
2988 static int
2989 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2990 {
2991         struct sysinfo s;
2992         u64 coherent_dma_mask, dma_mask;
2993
2994         if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
2995                 ioc->dma_mask = 32;
2996                 coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
2997         /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
2998         } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
2999                 ioc->dma_mask = 63;
3000                 coherent_dma_mask = dma_mask = DMA_BIT_MASK(63);
3001         } else {
3002                 ioc->dma_mask = 64;
3003                 coherent_dma_mask = dma_mask = DMA_BIT_MASK(64);
3004         }
3005
3006         if (ioc->use_32bit_dma)
3007                 coherent_dma_mask = DMA_BIT_MASK(32);
3008
3009         if (dma_set_mask(&pdev->dev, dma_mask) ||
3010             dma_set_coherent_mask(&pdev->dev, coherent_dma_mask))
3011                 return -ENODEV;
3012
3013         if (ioc->dma_mask > 32) {
3014                 ioc->base_add_sg_single = &_base_add_sg_single_64;
3015                 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
3016         } else {
3017                 ioc->base_add_sg_single = &_base_add_sg_single_32;
3018                 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
3019         }
3020
3021         si_meminfo(&s);
3022         ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
3023                 ioc->dma_mask, convert_to_kb(s.totalram));
3024
3025         return 0;
3026 }
3027
3028 /**
3029  * _base_check_enable_msix - checks MSIX capabable.
3030  * @ioc: per adapter object
3031  *
3032  * Check to see if card is capable of MSIX, and set number
3033  * of available msix vectors
3034  */
3035 static int
3036 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3037 {
3038         int base;
3039         u16 message_control;
3040
3041         /* Check whether controller SAS2008 B0 controller,
3042          * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
3043          */
3044         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
3045             ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
3046                 return -EINVAL;
3047         }
3048
3049         base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
3050         if (!base) {
3051                 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
3052                 return -EINVAL;
3053         }
3054
3055         /* get msix vector count */
3056         /* NUMA_IO not supported for older controllers */
3057         if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
3058             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
3059             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
3060             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
3061             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
3062             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
3063             ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
3064                 ioc->msix_vector_count = 1;
3065         else {
3066                 pci_read_config_word(ioc->pdev, base + 2, &message_control);
3067                 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
3068         }
3069         dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
3070                                   ioc->msix_vector_count));
3071         return 0;
3072 }
3073
3074 /**
3075  * mpt3sas_base_free_irq - free irq
3076  * @ioc: per adapter object
3077  *
3078  * Freeing respective reply_queue from the list.
3079  */
3080 void
3081 mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
3082 {
3083         unsigned int irq;
3084         struct adapter_reply_queue *reply_q, *next;
3085
3086         if (list_empty(&ioc->reply_queue_list))
3087                 return;
3088
3089         list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
3090                 list_del(&reply_q->list);
3091                 if (reply_q->is_iouring_poll_q) {
3092                         kfree(reply_q);
3093                         continue;
3094                 }
3095
3096                 if (ioc->smp_affinity_enable) {
3097                         irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
3098                         irq_update_affinity_hint(irq, NULL);
3099                 }
3100                 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
3101                          reply_q);
3102                 kfree(reply_q);
3103         }
3104 }
3105
3106 /**
3107  * _base_request_irq - request irq
3108  * @ioc: per adapter object
3109  * @index: msix index into vector table
3110  *
3111  * Inserting respective reply_queue into the list.
3112  */
3113 static int
3114 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
3115 {
3116         struct pci_dev *pdev = ioc->pdev;
3117         struct adapter_reply_queue *reply_q;
3118         int r, qid;
3119
3120         reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
3121         if (!reply_q) {
3122                 ioc_err(ioc, "unable to allocate memory %zu!\n",
3123                         sizeof(struct adapter_reply_queue));
3124                 return -ENOMEM;
3125         }
3126         reply_q->ioc = ioc;
3127         reply_q->msix_index = index;
3128
3129         atomic_set(&reply_q->busy, 0);
3130
3131         if (index >= ioc->iopoll_q_start_index) {
3132                 qid = index - ioc->iopoll_q_start_index;
3133                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
3134                     ioc->driver_name, ioc->id, qid);
3135                 reply_q->is_iouring_poll_q = 1;
3136                 ioc->io_uring_poll_queues[qid].reply_q = reply_q;
3137                 goto out;
3138         }
3139
3140
3141         if (ioc->msix_enable)
3142                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
3143                     ioc->driver_name, ioc->id, index);
3144         else
3145                 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
3146                     ioc->driver_name, ioc->id);
3147         r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
3148                         IRQF_SHARED, reply_q->name, reply_q);
3149         if (r) {
3150                 pr_err("%s: unable to allocate interrupt %d!\n",
3151                        reply_q->name, pci_irq_vector(pdev, index));
3152                 kfree(reply_q);
3153                 return -EBUSY;
3154         }
3155 out:
3156         INIT_LIST_HEAD(&reply_q->list);
3157         list_add_tail(&reply_q->list, &ioc->reply_queue_list);
3158         return 0;
3159 }
3160
3161 /**
3162  * _base_assign_reply_queues - assigning msix index for each cpu
3163  * @ioc: per adapter object
3164  *
3165  * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
3166  */
3167 static void
3168 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
3169 {
3170         unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
3171         struct adapter_reply_queue *reply_q;
3172         int iopoll_q_count = ioc->reply_queue_count -
3173             ioc->iopoll_q_start_index;
3174         const struct cpumask *mask;
3175
3176         if (!_base_is_controller_msix_enabled(ioc))
3177                 return;
3178
3179         if (ioc->msix_load_balance)
3180                 return;
3181
3182         memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
3183
3184         nr_cpus = num_online_cpus();
3185         nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
3186                                                ioc->facts.MaxMSIxVectors);
3187         if (!nr_msix)
3188                 return;
3189
3190         if (ioc->smp_affinity_enable) {
3191
3192                 /*
3193                  * set irq affinity to local numa node for those irqs
3194                  * corresponding to high iops queues.
3195                  */
3196                 if (ioc->high_iops_queues) {
3197                         mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
3198                         for (index = 0; index < ioc->high_iops_queues;
3199                             index++) {
3200                                 irq = pci_irq_vector(ioc->pdev, index);
3201                                 irq_set_affinity_and_hint(irq, mask);
3202                         }
3203                 }
3204
3205                 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3206                         const cpumask_t *mask;
3207
3208                         if (reply_q->msix_index < ioc->high_iops_queues ||
3209                             reply_q->msix_index >= ioc->iopoll_q_start_index)
3210                                 continue;
3211
3212                         mask = pci_irq_get_affinity(ioc->pdev,
3213                             reply_q->msix_index);
3214                         if (!mask) {
3215                                 ioc_warn(ioc, "no affinity for msi %x\n",
3216                                          reply_q->msix_index);
3217                                 goto fall_back;
3218                         }
3219
3220                         for_each_cpu_and(cpu, mask, cpu_online_mask) {
3221                                 if (cpu >= ioc->cpu_msix_table_sz)
3222                                         break;
3223                                 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3224                         }
3225                 }
3226                 return;
3227         }
3228
3229 fall_back:
3230         cpu = cpumask_first(cpu_online_mask);
3231         nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
3232         index = 0;
3233
3234         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3235                 unsigned int i, group = nr_cpus / nr_msix;
3236
3237                 if (reply_q->msix_index < ioc->high_iops_queues ||
3238                     reply_q->msix_index >= ioc->iopoll_q_start_index)
3239                         continue;
3240
3241                 if (cpu >= nr_cpus)
3242                         break;
3243
3244                 if (index < nr_cpus % nr_msix)
3245                         group++;
3246
3247                 for (i = 0 ; i < group ; i++) {
3248                         ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3249                         cpu = cpumask_next(cpu, cpu_online_mask);
3250                 }
3251                 index++;
3252         }
3253 }
3254
3255 /**
3256  * _base_check_and_enable_high_iops_queues - enable high iops mode
3257  * @ioc: per adapter object
3258  * @hba_msix_vector_count: msix vectors supported by HBA
3259  *
3260  * Enable high iops queues only if
3261  *  - HBA is a SEA/AERO controller and
3262  *  - MSI-Xs vector supported by the HBA is 128 and
3263  *  - total CPU count in the system >=16 and
3264  *  - loaded driver with default max_msix_vectors module parameter and
3265  *  - system booted in non kdump mode
3266  *
3267  * Return: nothing.
3268  */
3269 static void
3270 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3271                 int hba_msix_vector_count)
3272 {
3273         u16 lnksta, speed;
3274
3275         /*
3276          * Disable high iops queues if io uring poll queues are enabled.
3277          */
3278         if (perf_mode == MPT_PERF_MODE_IOPS ||
3279             perf_mode == MPT_PERF_MODE_LATENCY ||
3280             ioc->io_uring_poll_queues) {
3281                 ioc->high_iops_queues = 0;
3282                 return;
3283         }
3284
3285         if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3286
3287                 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3288                 speed = lnksta & PCI_EXP_LNKSTA_CLS;
3289
3290                 if (speed < 0x4) {
3291                         ioc->high_iops_queues = 0;
3292                         return;
3293                 }
3294         }
3295
3296         if (!reset_devices && ioc->is_aero_ioc &&
3297             hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3298             num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3299             max_msix_vectors == -1)
3300                 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3301         else
3302                 ioc->high_iops_queues = 0;
3303 }
3304
3305 /**
3306  * mpt3sas_base_disable_msix - disables msix
3307  * @ioc: per adapter object
3308  *
3309  */
3310 void
3311 mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3312 {
3313         if (!ioc->msix_enable)
3314                 return;
3315         pci_free_irq_vectors(ioc->pdev);
3316         ioc->msix_enable = 0;
3317         kfree(ioc->io_uring_poll_queues);
3318 }
3319
3320 /**
3321  * _base_alloc_irq_vectors - allocate msix vectors
3322  * @ioc: per adapter object
3323  *
3324  */
3325 static int
3326 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3327 {
3328         int i, irq_flags = PCI_IRQ_MSIX;
3329         struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3330         struct irq_affinity *descp = &desc;
3331         /*
3332          * Don't allocate msix vectors for poll_queues.
3333          * msix_vectors is always within a range of FW supported reply queue.
3334          */
3335         int nr_msix_vectors = ioc->iopoll_q_start_index;
3336
3337
3338         if (ioc->smp_affinity_enable)
3339                 irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
3340         else
3341                 descp = NULL;
3342
3343         ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
3344             ioc->reply_queue_count, nr_msix_vectors);
3345
3346         i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3347             ioc->high_iops_queues,
3348             nr_msix_vectors, irq_flags, descp);
3349
3350         return i;
3351 }
3352
3353 /**
3354  * _base_enable_msix - enables msix, failback to io_apic
3355  * @ioc: per adapter object
3356  *
3357  */
3358 static int
3359 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3360 {
3361         int r;
3362         int i, local_max_msix_vectors;
3363         u8 try_msix = 0;
3364         int iopoll_q_count = 0;
3365
3366         ioc->msix_load_balance = false;
3367
3368         if (msix_disable == -1 || msix_disable == 0)
3369                 try_msix = 1;
3370
3371         if (!try_msix)
3372                 goto try_ioapic;
3373
3374         if (_base_check_enable_msix(ioc) != 0)
3375                 goto try_ioapic;
3376
3377         ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3378         pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3379                 ioc->cpu_count, max_msix_vectors);
3380
3381         ioc->reply_queue_count =
3382                 min_t(int, ioc->cpu_count, ioc->msix_vector_count);
3383
3384         if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3385                 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3386         else
3387                 local_max_msix_vectors = max_msix_vectors;
3388
3389         if (local_max_msix_vectors == 0)
3390                 goto try_ioapic;
3391
3392         /*
3393          * Enable msix_load_balance only if combined reply queue mode is
3394          * disabled on SAS3 & above generation HBA devices.
3395          */
3396         if (!ioc->combined_reply_queue &&
3397             ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3398                 ioc_info(ioc,
3399                     "combined ReplyQueue is off, Enabling msix load balance\n");
3400                 ioc->msix_load_balance = true;
3401         }
3402
3403         /*
3404          * smp affinity setting is not need when msix load balance
3405          * is enabled.
3406          */
3407         if (ioc->msix_load_balance)
3408                 ioc->smp_affinity_enable = 0;
3409
3410         if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
3411                 ioc->shost->host_tagset = 0;
3412
3413         /*
3414          * Enable io uring poll queues only if host_tagset is enabled.
3415          */
3416         if (ioc->shost->host_tagset)
3417                 iopoll_q_count = poll_queues;
3418
3419         if (iopoll_q_count) {
3420                 ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
3421                     sizeof(struct io_uring_poll_queue), GFP_KERNEL);
3422                 if (!ioc->io_uring_poll_queues)
3423                         iopoll_q_count = 0;
3424         }
3425
3426         if (ioc->is_aero_ioc)
3427                 _base_check_and_enable_high_iops_queues(ioc,
3428                     ioc->msix_vector_count);
3429
3430         /*
3431          * Add high iops queues count to reply queue count if high iops queues
3432          * are enabled.
3433          */
3434         ioc->reply_queue_count = min_t(int,
3435             ioc->reply_queue_count + ioc->high_iops_queues,
3436             ioc->msix_vector_count);
3437
3438         /*
3439          * Adjust the reply queue count incase reply queue count
3440          * exceeds the user provided MSIx vectors count.
3441          */
3442         if (local_max_msix_vectors > 0)
3443                 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3444                     ioc->reply_queue_count);
3445         /*
3446          * Add io uring poll queues count to reply queues count
3447          * if io uring is enabled in driver.
3448          */
3449         if (iopoll_q_count) {
3450                 if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
3451                         iopoll_q_count = 0;
3452                 ioc->reply_queue_count = min_t(int,
3453                     ioc->reply_queue_count + iopoll_q_count,
3454                     ioc->msix_vector_count);
3455         }
3456
3457         /*
3458          * Starting index of io uring poll queues in reply queue list.
3459          */
3460         ioc->iopoll_q_start_index =
3461             ioc->reply_queue_count - iopoll_q_count;
3462
3463         r = _base_alloc_irq_vectors(ioc);
3464         if (r < 0) {
3465                 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3466                 goto try_ioapic;
3467         }
3468
3469         /*
3470          * Adjust the reply queue count if the allocated
3471          * MSIx vectors is less then the requested number
3472          * of MSIx vectors.
3473          */
3474         if (r < ioc->iopoll_q_start_index) {
3475                 ioc->reply_queue_count = r + iopoll_q_count;
3476                 ioc->iopoll_q_start_index =
3477                     ioc->reply_queue_count - iopoll_q_count;
3478         }
3479
3480         ioc->msix_enable = 1;
3481         for (i = 0; i < ioc->reply_queue_count; i++) {
3482                 r = _base_request_irq(ioc, i);
3483                 if (r) {
3484                         mpt3sas_base_free_irq(ioc);
3485                         mpt3sas_base_disable_msix(ioc);
3486                         goto try_ioapic;
3487                 }
3488         }
3489
3490         ioc_info(ioc, "High IOPs queues : %s\n",
3491                         ioc->high_iops_queues ? "enabled" : "disabled");
3492
3493         return 0;
3494
3495 /* failback to io_apic interrupt routing */
3496  try_ioapic:
3497         ioc->high_iops_queues = 0;
3498         ioc_info(ioc, "High IOPs queues : disabled\n");
3499         ioc->reply_queue_count = 1;
3500         ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
3501         r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3502         if (r < 0) {
3503                 dfailprintk(ioc,
3504                             ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3505                                      r));
3506         } else
3507                 r = _base_request_irq(ioc, 0);
3508
3509         return r;
3510 }
3511
3512 /**
3513  * mpt3sas_base_unmap_resources - free controller resources
3514  * @ioc: per adapter object
3515  */
3516 static void
3517 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3518 {
3519         struct pci_dev *pdev = ioc->pdev;
3520
3521         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3522
3523         mpt3sas_base_free_irq(ioc);
3524         mpt3sas_base_disable_msix(ioc);
3525
3526         kfree(ioc->replyPostRegisterIndex);
3527         ioc->replyPostRegisterIndex = NULL;
3528
3529
3530         if (ioc->chip_phys) {
3531                 iounmap(ioc->chip);
3532                 ioc->chip_phys = 0;
3533         }
3534
3535         if (pci_is_enabled(pdev)) {
3536                 pci_release_selected_regions(ioc->pdev, ioc->bars);
3537                 pci_disable_device(pdev);
3538         }
3539 }
3540
3541 static int
3542 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3543
3544 /**
3545  * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
3546  *     and if it is in fault state then issue diag reset.
3547  * @ioc: per adapter object
3548  *
3549  * Return: 0 for success, non-zero for failure.
3550  */
3551 int
3552 mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3553 {
3554         u32 ioc_state;
3555         int rc = -EFAULT;
3556
3557         dinitprintk(ioc, pr_info("%s\n", __func__));
3558         if (ioc->pci_error_recovery)
3559                 return 0;
3560         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3561         dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3562
3563         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3564                 mpt3sas_print_fault_code(ioc, ioc_state &
3565                     MPI2_DOORBELL_DATA_MASK);
3566                 mpt3sas_base_mask_interrupts(ioc);
3567                 rc = _base_diag_reset(ioc);
3568         } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3569             MPI2_IOC_STATE_COREDUMP) {
3570                 mpt3sas_print_coredump_info(ioc, ioc_state &
3571                      MPI2_DOORBELL_DATA_MASK);
3572                 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3573                 mpt3sas_base_mask_interrupts(ioc);
3574                 rc = _base_diag_reset(ioc);
3575         }
3576
3577         return rc;
3578 }
3579
3580 /**
3581  * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3582  * @ioc: per adapter object
3583  *
3584  * Return: 0 for success, non-zero for failure.
3585  */
3586 int
3587 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3588 {
3589         struct pci_dev *pdev = ioc->pdev;
3590         u32 memap_sz;
3591         u32 pio_sz;
3592         int i, r = 0, rc;
3593         u64 pio_chip = 0;
3594         phys_addr_t chip_phys = 0;
3595         struct adapter_reply_queue *reply_q;
3596         int iopoll_q_count = 0;
3597
3598         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3599
3600         ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3601         if (pci_enable_device_mem(pdev)) {
3602                 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3603                 ioc->bars = 0;
3604                 return -ENODEV;
3605         }
3606
3607
3608         if (pci_request_selected_regions(pdev, ioc->bars,
3609             ioc->driver_name)) {
3610                 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3611                 ioc->bars = 0;
3612                 r = -ENODEV;
3613                 goto out_fail;
3614         }
3615
3616         pci_set_master(pdev);
3617
3618
3619         if (_base_config_dma_addressing(ioc, pdev) != 0) {
3620                 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3621                 r = -ENODEV;
3622                 goto out_fail;
3623         }
3624
3625         for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3626              (!memap_sz || !pio_sz); i++) {
3627                 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3628                         if (pio_sz)
3629                                 continue;
3630                         pio_chip = (u64)pci_resource_start(pdev, i);
3631                         pio_sz = pci_resource_len(pdev, i);
3632                 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3633                         if (memap_sz)
3634                                 continue;
3635                         ioc->chip_phys = pci_resource_start(pdev, i);
3636                         chip_phys = ioc->chip_phys;
3637                         memap_sz = pci_resource_len(pdev, i);
3638                         ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3639                 }
3640         }
3641
3642         if (ioc->chip == NULL) {
3643                 ioc_err(ioc,
3644                     "unable to map adapter memory! or resource not found\n");
3645                 r = -EINVAL;
3646                 goto out_fail;
3647         }
3648
3649         mpt3sas_base_mask_interrupts(ioc);
3650
3651         r = _base_get_ioc_facts(ioc);
3652         if (r) {
3653                 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
3654                 if (rc || (_base_get_ioc_facts(ioc)))
3655                         goto out_fail;
3656         }
3657
3658         if (!ioc->rdpq_array_enable_assigned) {
3659                 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3660                 ioc->rdpq_array_enable_assigned = 1;
3661         }
3662
3663         r = _base_enable_msix(ioc);
3664         if (r)
3665                 goto out_fail;
3666
3667         iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
3668         for (i = 0; i < iopoll_q_count; i++) {
3669                 atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
3670                 atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
3671         }
3672
3673         if (!ioc->is_driver_loading)
3674                 _base_init_irqpolls(ioc);
3675         /* Use the Combined reply queue feature only for SAS3 C0 & higher
3676          * revision HBAs and also only when reply queue count is greater than 8
3677          */
3678         if (ioc->combined_reply_queue) {
3679                 /* Determine the Supplemental Reply Post Host Index Registers
3680                  * Addresse. Supplemental Reply Post Host Index Registers
3681                  * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3682                  * each register is at offset bytes of
3683                  * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3684                  */
3685                 ioc->replyPostRegisterIndex = kcalloc(
3686                      ioc->combined_reply_index_count,
3687                      sizeof(resource_size_t *), GFP_KERNEL);
3688                 if (!ioc->replyPostRegisterIndex) {
3689                         ioc_err(ioc,
3690                             "allocation for replyPostRegisterIndex failed!\n");
3691                         r = -ENOMEM;
3692                         goto out_fail;
3693                 }
3694
3695                 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3696                         ioc->replyPostRegisterIndex[i] =
3697                                 (resource_size_t __iomem *)
3698                                 ((u8 __force *)&ioc->chip->Doorbell +
3699                                  MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3700                                  (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3701                 }
3702         }
3703
3704         if (ioc->is_warpdrive) {
3705                 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3706                     &ioc->chip->ReplyPostHostIndex;
3707
3708                 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3709                         ioc->reply_post_host_index[i] =
3710                         (resource_size_t __iomem *)
3711                         ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3712                         * 4)));
3713         }
3714
3715         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3716                 if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
3717                         pr_info("%s: enabled: index: %d\n",
3718                             reply_q->name, reply_q->msix_index);
3719                         continue;
3720                 }
3721
3722                 pr_info("%s: %s enabled: IRQ %d\n",
3723                         reply_q->name,
3724                         ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3725                         pci_irq_vector(ioc->pdev, reply_q->msix_index));
3726         }
3727
3728         ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3729                  &chip_phys, ioc->chip, memap_sz);
3730         ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3731                  (unsigned long long)pio_chip, pio_sz);
3732
3733         /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3734         pci_save_state(pdev);
3735         return 0;
3736
3737  out_fail:
3738         mpt3sas_base_unmap_resources(ioc);
3739         return r;
3740 }
3741
3742 /**
3743  * mpt3sas_base_get_msg_frame - obtain request mf pointer
3744  * @ioc: per adapter object
3745  * @smid: system request message index(smid zero is invalid)
3746  *
3747  * Return: virt pointer to message frame.
3748  */
3749 void *
3750 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3751 {
3752         return (void *)(ioc->request + (smid * ioc->request_sz));
3753 }
3754
3755 /**
3756  * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3757  * @ioc: per adapter object
3758  * @smid: system request message index
3759  *
3760  * Return: virt pointer to sense buffer.
3761  */
3762 void *
3763 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3764 {
3765         return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3766 }
3767
3768 /**
3769  * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3770  * @ioc: per adapter object
3771  * @smid: system request message index
3772  *
3773  * Return: phys pointer to the low 32bit address of the sense buffer.
3774  */
3775 __le32
3776 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3777 {
3778         return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3779             SCSI_SENSE_BUFFERSIZE));
3780 }
3781
3782 /**
3783  * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3784  * @ioc: per adapter object
3785  * @smid: system request message index
3786  *
3787  * Return: virt pointer to a PCIe SGL.
3788  */
3789 void *
3790 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3791 {
3792         return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3793 }
3794
3795 /**
3796  * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3797  * @ioc: per adapter object
3798  * @smid: system request message index
3799  *
3800  * Return: phys pointer to the address of the PCIe buffer.
3801  */
3802 dma_addr_t
3803 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3804 {
3805         return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3806 }
3807
3808 /**
3809  * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3810  * @ioc: per adapter object
3811  * @phys_addr: lower 32 physical addr of the reply
3812  *
3813  * Converts 32bit lower physical addr into a virt address.
3814  */
3815 void *
3816 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3817 {
3818         if (!phys_addr)
3819                 return NULL;
3820         return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3821 }
3822
3823 /**
3824  * _base_get_msix_index - get the msix index
3825  * @ioc: per adapter object
3826  * @scmd: scsi_cmnd object
3827  *
3828  * Return: msix index of general reply queues,
3829  * i.e. reply queue on which IO request's reply
3830  * should be posted by the HBA firmware.
3831  */
3832 static inline u8
3833 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3834         struct scsi_cmnd *scmd)
3835 {
3836         /* Enables reply_queue load balancing */
3837         if (ioc->msix_load_balance)
3838                 return ioc->reply_queue_count ?
3839                     base_mod64(atomic64_add_return(1,
3840                     &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3841
3842         if (scmd && ioc->shost->nr_hw_queues > 1) {
3843                 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
3844
3845                 return blk_mq_unique_tag_to_hwq(tag) +
3846                         ioc->high_iops_queues;
3847         }
3848
3849         return ioc->cpu_msix_table[raw_smp_processor_id()];
3850 }
3851
3852 /**
3853  * _base_get_high_iops_msix_index - get the msix index of
3854  *                              high iops queues
3855  * @ioc: per adapter object
3856  * @scmd: scsi_cmnd object
3857  *
3858  * Return: msix index of high iops reply queues.
3859  * i.e. high iops reply queue on which IO request's
3860  * reply should be posted by the HBA firmware.
3861  */
3862 static inline u8
3863 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3864         struct scsi_cmnd *scmd)
3865 {
3866         /**
3867          * Round robin the IO interrupts among the high iops
3868          * reply queues in terms of batch count 16 when outstanding
3869          * IOs on the target device is >=8.
3870          */
3871
3872         if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3873                 return base_mod64((
3874                     atomic64_add_return(1, &ioc->high_iops_outstanding) /
3875                     MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3876                     MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3877
3878         return _base_get_msix_index(ioc, scmd);
3879 }
3880
3881 /**
3882  * mpt3sas_base_get_smid - obtain a free smid from internal queue
3883  * @ioc: per adapter object
3884  * @cb_idx: callback index
3885  *
3886  * Return: smid (zero is invalid)
3887  */
3888 u16
3889 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3890 {
3891         unsigned long flags;
3892         struct request_tracker *request;
3893         u16 smid;
3894
3895         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3896         if (list_empty(&ioc->internal_free_list)) {
3897                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3898                 ioc_err(ioc, "%s: smid not available\n", __func__);
3899                 return 0;
3900         }
3901
3902         request = list_entry(ioc->internal_free_list.next,
3903             struct request_tracker, tracker_list);
3904         request->cb_idx = cb_idx;
3905         smid = request->smid;
3906         list_del(&request->tracker_list);
3907         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3908         return smid;
3909 }
3910
3911 /**
3912  * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3913  * @ioc: per adapter object
3914  * @cb_idx: callback index
3915  * @scmd: pointer to scsi command object
3916  *
3917  * Return: smid (zero is invalid)
3918  */
3919 u16
3920 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3921         struct scsi_cmnd *scmd)
3922 {
3923         struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3924         u16 smid;
3925         u32 tag, unique_tag;
3926
3927         unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
3928         tag = blk_mq_unique_tag_to_tag(unique_tag);
3929
3930         /*
3931          * Store hw queue number corresponding to the tag.
3932          * This hw queue number is used later to determine
3933          * the unique_tag using the logic below. This unique_tag
3934          * is used to retrieve the scmd pointer corresponding
3935          * to tag using scsi_host_find_tag() API.
3936          *
3937          * tag = smid - 1;
3938          * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
3939          */
3940         ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
3941
3942         smid = tag + 1;
3943         request->cb_idx = cb_idx;
3944         request->smid = smid;
3945         request->scmd = scmd;
3946         INIT_LIST_HEAD(&request->chain_list);
3947         return smid;
3948 }
3949
3950 /**
3951  * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3952  * @ioc: per adapter object
3953  * @cb_idx: callback index
3954  *
3955  * Return: smid (zero is invalid)
3956  */
3957 u16
3958 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3959 {
3960         unsigned long flags;
3961         struct request_tracker *request;
3962         u16 smid;
3963
3964         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3965         if (list_empty(&ioc->hpr_free_list)) {
3966                 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3967                 return 0;
3968         }
3969
3970         request = list_entry(ioc->hpr_free_list.next,
3971             struct request_tracker, tracker_list);
3972         request->cb_idx = cb_idx;
3973         smid = request->smid;
3974         list_del(&request->tracker_list);
3975         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3976         return smid;
3977 }
3978
3979 static void
3980 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3981 {
3982         /*
3983          * See _wait_for_commands_to_complete() call with regards to this code.
3984          */
3985         if (ioc->shost_recovery && ioc->pending_io_count) {
3986                 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3987                 if (ioc->pending_io_count == 0)
3988                         wake_up(&ioc->reset_wq);
3989         }
3990 }
3991
3992 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3993                            struct scsiio_tracker *st)
3994 {
3995         if (WARN_ON(st->smid == 0))
3996                 return;
3997         st->cb_idx = 0xFF;
3998         st->direct_io = 0;
3999         st->scmd = NULL;
4000         atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
4001         st->smid = 0;
4002 }
4003
4004 /**
4005  * mpt3sas_base_free_smid - put smid back on free_list
4006  * @ioc: per adapter object
4007  * @smid: system request message index
4008  */
4009 void
4010 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4011 {
4012         unsigned long flags;
4013         int i;
4014
4015         if (smid < ioc->hi_priority_smid) {
4016                 struct scsiio_tracker *st;
4017                 void *request;
4018
4019                 st = _get_st_from_smid(ioc, smid);
4020                 if (!st) {
4021                         _base_recovery_check(ioc);
4022                         return;
4023                 }
4024
4025                 /* Clear MPI request frame */
4026                 request = mpt3sas_base_get_msg_frame(ioc, smid);
4027                 memset(request, 0, ioc->request_sz);
4028
4029                 mpt3sas_base_clear_st(ioc, st);
4030                 _base_recovery_check(ioc);
4031                 ioc->io_queue_num[smid - 1] = 0;
4032                 return;
4033         }
4034
4035         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4036         if (smid < ioc->internal_smid) {
4037                 /* hi-priority */
4038                 i = smid - ioc->hi_priority_smid;
4039                 ioc->hpr_lookup[i].cb_idx = 0xFF;
4040                 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
4041         } else if (smid <= ioc->hba_queue_depth) {
4042                 /* internal queue */
4043                 i = smid - ioc->internal_smid;
4044                 ioc->internal_lookup[i].cb_idx = 0xFF;
4045                 list_add(&ioc->internal_lookup[i].tracker_list,
4046                     &ioc->internal_free_list);
4047         }
4048         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4049 }
4050
4051 /**
4052  * _base_mpi_ep_writeq - 32 bit write to MMIO
4053  * @b: data payload
4054  * @addr: address in MMIO space
4055  * @writeq_lock: spin lock
4056  *
4057  * This special handling for MPI EP to take care of 32 bit
4058  * environment where its not quarenteed to send the entire word
4059  * in one transfer.
4060  */
4061 static inline void
4062 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
4063                                         spinlock_t *writeq_lock)
4064 {
4065         unsigned long flags;
4066
4067         spin_lock_irqsave(writeq_lock, flags);
4068         __raw_writel((u32)(b), addr);
4069         __raw_writel((u32)(b >> 32), (addr + 4));
4070         spin_unlock_irqrestore(writeq_lock, flags);
4071 }
4072
4073 /**
4074  * _base_writeq - 64 bit write to MMIO
4075  * @b: data payload
4076  * @addr: address in MMIO space
4077  * @writeq_lock: spin lock
4078  *
4079  * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
4080  * care of 32 bit environment where its not quarenteed to send the entire word
4081  * in one transfer.
4082  */
4083 #if defined(writeq) && defined(CONFIG_64BIT)
4084 static inline void
4085 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
4086 {
4087         wmb();
4088         __raw_writeq(b, addr);
4089         barrier();
4090 }
4091 #else
4092 static inline void
4093 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
4094 {
4095         _base_mpi_ep_writeq(b, addr, writeq_lock);
4096 }
4097 #endif
4098
4099 /**
4100  * _base_set_and_get_msix_index - get the msix index and assign to msix_io
4101  *                                variable of scsi tracker
4102  * @ioc: per adapter object
4103  * @smid: system request message index
4104  *
4105  * Return: msix index.
4106  */
4107 static u8
4108 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4109 {
4110         struct scsiio_tracker *st = NULL;
4111
4112         if (smid < ioc->hi_priority_smid)
4113                 st = _get_st_from_smid(ioc, smid);
4114
4115         if (st == NULL)
4116                 return  _base_get_msix_index(ioc, NULL);
4117
4118         st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
4119         return st->msix_io;
4120 }
4121
4122 /**
4123  * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
4124  * @ioc: per adapter object
4125  * @smid: system request message index
4126  * @handle: device handle
4127  */
4128 static void
4129 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
4130         u16 smid, u16 handle)
4131 {
4132         Mpi2RequestDescriptorUnion_t descriptor;
4133         u64 *request = (u64 *)&descriptor;
4134         void *mpi_req_iomem;
4135         __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4136
4137         _clone_sg_entries(ioc, (void *) mfp, smid);
4138         mpi_req_iomem = (void __force *)ioc->chip +
4139                         MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4140         _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4141                                         ioc->request_sz);
4142         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4143         descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4144         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4145         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4146         descriptor.SCSIIO.LMID = 0;
4147         _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4148             &ioc->scsi_lookup_lock);
4149 }
4150
4151 /**
4152  * _base_put_smid_scsi_io - send SCSI_IO request to firmware
4153  * @ioc: per adapter object
4154  * @smid: system request message index
4155  * @handle: device handle
4156  */
4157 static void
4158 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
4159 {
4160         Mpi2RequestDescriptorUnion_t descriptor;
4161         u64 *request = (u64 *)&descriptor;
4162
4163
4164         descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4165         descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4166         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4167         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4168         descriptor.SCSIIO.LMID = 0;
4169         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4170             &ioc->scsi_lookup_lock);
4171 }
4172
4173 /**
4174  * _base_put_smid_fast_path - send fast path request to firmware
4175  * @ioc: per adapter object
4176  * @smid: system request message index
4177  * @handle: device handle
4178  */
4179 static void
4180 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4181         u16 handle)
4182 {
4183         Mpi2RequestDescriptorUnion_t descriptor;
4184         u64 *request = (u64 *)&descriptor;
4185
4186         descriptor.SCSIIO.RequestFlags =
4187             MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4188         descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4189         descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4190         descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4191         descriptor.SCSIIO.LMID = 0;
4192         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4193             &ioc->scsi_lookup_lock);
4194 }
4195
4196 /**
4197  * _base_put_smid_hi_priority - send Task Management request to firmware
4198  * @ioc: per adapter object
4199  * @smid: system request message index
4200  * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4201  */
4202 static void
4203 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4204         u16 msix_task)
4205 {
4206         Mpi2RequestDescriptorUnion_t descriptor;
4207         void *mpi_req_iomem;
4208         u64 *request;
4209
4210         if (ioc->is_mcpu_endpoint) {
4211                 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4212
4213                 /* TBD 256 is offset within sys register. */
4214                 mpi_req_iomem = (void __force *)ioc->chip
4215                                         + MPI_FRAME_START_OFFSET
4216                                         + (smid * ioc->request_sz);
4217                 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4218                                                         ioc->request_sz);
4219         }
4220
4221         request = (u64 *)&descriptor;
4222
4223         descriptor.HighPriority.RequestFlags =
4224             MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4225         descriptor.HighPriority.MSIxIndex =  msix_task;
4226         descriptor.HighPriority.SMID = cpu_to_le16(smid);
4227         descriptor.HighPriority.LMID = 0;
4228         descriptor.HighPriority.Reserved1 = 0;
4229         if (ioc->is_mcpu_endpoint)
4230                 _base_mpi_ep_writeq(*request,
4231                                 &ioc->chip->RequestDescriptorPostLow,
4232                                 &ioc->scsi_lookup_lock);
4233         else
4234                 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4235                     &ioc->scsi_lookup_lock);
4236 }
4237
4238 /**
4239  * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
4240  *  firmware
4241  * @ioc: per adapter object
4242  * @smid: system request message index
4243  */
4244 void
4245 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4246 {
4247         Mpi2RequestDescriptorUnion_t descriptor;
4248         u64 *request = (u64 *)&descriptor;
4249
4250         descriptor.Default.RequestFlags =
4251                 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
4252         descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
4253         descriptor.Default.SMID = cpu_to_le16(smid);
4254         descriptor.Default.LMID = 0;
4255         descriptor.Default.DescriptorTypeDependent = 0;
4256         _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4257             &ioc->scsi_lookup_lock);
4258 }
4259
4260 /**
4261  * _base_put_smid_default - Default, primarily used for config pages
4262  * @ioc: per adapter object
4263  * @smid: system request message index
4264  */
4265 static void
4266 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4267 {
4268         Mpi2RequestDescriptorUnion_t descriptor;
4269         void *mpi_req_iomem;
4270         u64 *request;
4271
4272         if (ioc->is_mcpu_endpoint) {
4273                 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4274
4275                 _clone_sg_entries(ioc, (void *) mfp, smid);
4276                 /* TBD 256 is offset within sys register */
4277                 mpi_req_iomem = (void __force *)ioc->chip +
4278                         MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4279                 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4280                                                         ioc->request_sz);
4281         }
4282         request = (u64 *)&descriptor;
4283         descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4284         descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4285         descriptor.Default.SMID = cpu_to_le16(smid);
4286         descriptor.Default.LMID = 0;
4287         descriptor.Default.DescriptorTypeDependent = 0;
4288         if (ioc->is_mcpu_endpoint)
4289                 _base_mpi_ep_writeq(*request,
4290                                 &ioc->chip->RequestDescriptorPostLow,
4291                                 &ioc->scsi_lookup_lock);
4292         else
4293                 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4294                                 &ioc->scsi_lookup_lock);
4295 }
4296
4297 /**
4298  * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4299  *   Atomic Request Descriptor
4300  * @ioc: per adapter object
4301  * @smid: system request message index
4302  * @handle: device handle, unused in this function, for function type match
4303  *
4304  * Return: nothing.
4305  */
4306 static void
4307 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4308         u16 handle)
4309 {
4310         Mpi26AtomicRequestDescriptor_t descriptor;
4311         u32 *request = (u32 *)&descriptor;
4312
4313         descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4314         descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4315         descriptor.SMID = cpu_to_le16(smid);
4316
4317         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4318 }
4319
4320 /**
4321  * _base_put_smid_fast_path_atomic - send fast path request to firmware
4322  * using Atomic Request Descriptor
4323  * @ioc: per adapter object
4324  * @smid: system request message index
4325  * @handle: device handle, unused in this function, for function type match
4326  * Return: nothing
4327  */
4328 static void
4329 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4330         u16 handle)
4331 {
4332         Mpi26AtomicRequestDescriptor_t descriptor;
4333         u32 *request = (u32 *)&descriptor;
4334
4335         descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4336         descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4337         descriptor.SMID = cpu_to_le16(smid);
4338
4339         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4340 }
4341
4342 /**
4343  * _base_put_smid_hi_priority_atomic - send Task Management request to
4344  * firmware using Atomic Request Descriptor
4345  * @ioc: per adapter object
4346  * @smid: system request message index
4347  * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4348  *
4349  * Return: nothing.
4350  */
4351 static void
4352 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4353         u16 msix_task)
4354 {
4355         Mpi26AtomicRequestDescriptor_t descriptor;
4356         u32 *request = (u32 *)&descriptor;
4357
4358         descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4359         descriptor.MSIxIndex = msix_task;
4360         descriptor.SMID = cpu_to_le16(smid);
4361
4362         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4363 }
4364
4365 /**
4366  * _base_put_smid_default_atomic - Default, primarily used for config pages
4367  * use Atomic Request Descriptor
4368  * @ioc: per adapter object
4369  * @smid: system request message index
4370  *
4371  * Return: nothing.
4372  */
4373 static void
4374 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4375 {
4376         Mpi26AtomicRequestDescriptor_t descriptor;
4377         u32 *request = (u32 *)&descriptor;
4378
4379         descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4380         descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4381         descriptor.SMID = cpu_to_le16(smid);
4382
4383         writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4384 }
4385
4386 /**
4387  * _base_display_OEMs_branding - Display branding string
4388  * @ioc: per adapter object
4389  */
4390 static void
4391 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4392 {
4393         if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4394                 return;
4395
4396         switch (ioc->pdev->subsystem_vendor) {
4397         case PCI_VENDOR_ID_INTEL:
4398                 switch (ioc->pdev->device) {
4399                 case MPI2_MFGPAGE_DEVID_SAS2008:
4400                         switch (ioc->pdev->subsystem_device) {
4401                         case MPT2SAS_INTEL_RMS2LL080_SSDID:
4402                                 ioc_info(ioc, "%s\n",
4403                                          MPT2SAS_INTEL_RMS2LL080_BRANDING);
4404                                 break;
4405                         case MPT2SAS_INTEL_RMS2LL040_SSDID:
4406                                 ioc_info(ioc, "%s\n",
4407                                          MPT2SAS_INTEL_RMS2LL040_BRANDING);
4408                                 break;
4409                         case MPT2SAS_INTEL_SSD910_SSDID:
4410                                 ioc_info(ioc, "%s\n",
4411                                          MPT2SAS_INTEL_SSD910_BRANDING);
4412                                 break;
4413                         default:
4414                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4415                                          ioc->pdev->subsystem_device);
4416                                 break;
4417                         }
4418                         break;
4419                 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4420                         switch (ioc->pdev->subsystem_device) {
4421                         case MPT2SAS_INTEL_RS25GB008_SSDID:
4422                                 ioc_info(ioc, "%s\n",
4423                                          MPT2SAS_INTEL_RS25GB008_BRANDING);
4424                                 break;
4425                         case MPT2SAS_INTEL_RMS25JB080_SSDID:
4426                                 ioc_info(ioc, "%s\n",
4427                                          MPT2SAS_INTEL_RMS25JB080_BRANDING);
4428                                 break;
4429                         case MPT2SAS_INTEL_RMS25JB040_SSDID:
4430                                 ioc_info(ioc, "%s\n",
4431                                          MPT2SAS_INTEL_RMS25JB040_BRANDING);
4432                                 break;
4433                         case MPT2SAS_INTEL_RMS25KB080_SSDID:
4434                                 ioc_info(ioc, "%s\n",
4435                                          MPT2SAS_INTEL_RMS25KB080_BRANDING);
4436                                 break;
4437                         case MPT2SAS_INTEL_RMS25KB040_SSDID:
4438                                 ioc_info(ioc, "%s\n",
4439                                          MPT2SAS_INTEL_RMS25KB040_BRANDING);
4440                                 break;
4441                         case MPT2SAS_INTEL_RMS25LB040_SSDID:
4442                                 ioc_info(ioc, "%s\n",
4443                                          MPT2SAS_INTEL_RMS25LB040_BRANDING);
4444                                 break;
4445                         case MPT2SAS_INTEL_RMS25LB080_SSDID:
4446                                 ioc_info(ioc, "%s\n",
4447                                          MPT2SAS_INTEL_RMS25LB080_BRANDING);
4448                                 break;
4449                         default:
4450                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4451                                          ioc->pdev->subsystem_device);
4452                                 break;
4453                         }
4454                         break;
4455                 case MPI25_MFGPAGE_DEVID_SAS3008:
4456                         switch (ioc->pdev->subsystem_device) {
4457                         case MPT3SAS_INTEL_RMS3JC080_SSDID:
4458                                 ioc_info(ioc, "%s\n",
4459                                          MPT3SAS_INTEL_RMS3JC080_BRANDING);
4460                                 break;
4461
4462                         case MPT3SAS_INTEL_RS3GC008_SSDID:
4463                                 ioc_info(ioc, "%s\n",
4464                                          MPT3SAS_INTEL_RS3GC008_BRANDING);
4465                                 break;
4466                         case MPT3SAS_INTEL_RS3FC044_SSDID:
4467                                 ioc_info(ioc, "%s\n",
4468                                          MPT3SAS_INTEL_RS3FC044_BRANDING);
4469                                 break;
4470                         case MPT3SAS_INTEL_RS3UC080_SSDID:
4471                                 ioc_info(ioc, "%s\n",
4472                                          MPT3SAS_INTEL_RS3UC080_BRANDING);
4473                                 break;
4474                         default:
4475                                 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4476                                          ioc->pdev->subsystem_device);
4477                                 break;
4478                         }
4479                         break;
4480                 default:
4481                         ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4482                                  ioc->pdev->subsystem_device);
4483                         break;
4484                 }
4485                 break;
4486         case PCI_VENDOR_ID_DELL:
4487                 switch (ioc->pdev->device) {
4488                 case MPI2_MFGPAGE_DEVID_SAS2008:
4489                         switch (ioc->pdev->subsystem_device) {
4490                         case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4491                                 ioc_info(ioc, "%s\n",
4492                                          MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4493                                 break;
4494                         case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4495                                 ioc_info(ioc, "%s\n",
4496                                          MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4497                                 break;
4498                         case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4499                                 ioc_info(ioc, "%s\n",
4500                                          MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4501                                 break;
4502                         case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4503                                 ioc_info(ioc, "%s\n",
4504                                          MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4505                                 break;
4506                         case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4507                                 ioc_info(ioc, "%s\n",
4508                                          MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4509                                 break;
4510                         case MPT2SAS_DELL_PERC_H200_SSDID:
4511                                 ioc_info(ioc, "%s\n",
4512                                          MPT2SAS_DELL_PERC_H200_BRANDING);
4513                                 break;
4514                         case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4515                                 ioc_info(ioc, "%s\n",
4516                                          MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4517                                 break;
4518                         default:
4519                                 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4520                                          ioc->pdev->subsystem_device);
4521                                 break;
4522                         }
4523                         break;
4524                 case MPI25_MFGPAGE_DEVID_SAS3008:
4525                         switch (ioc->pdev->subsystem_device) {
4526                         case MPT3SAS_DELL_12G_HBA_SSDID:
4527                                 ioc_info(ioc, "%s\n",
4528                                          MPT3SAS_DELL_12G_HBA_BRANDING);
4529                                 break;
4530                         default:
4531                                 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4532                                          ioc->pdev->subsystem_device);
4533                                 break;
4534                         }
4535                         break;
4536                 default:
4537                         ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4538                                  ioc->pdev->subsystem_device);
4539                         break;
4540                 }
4541                 break;
4542         case PCI_VENDOR_ID_CISCO:
4543                 switch (ioc->pdev->device) {
4544                 case MPI25_MFGPAGE_DEVID_SAS3008:
4545                         switch (ioc->pdev->subsystem_device) {
4546                         case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4547                                 ioc_info(ioc, "%s\n",
4548                                          MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4549                                 break;
4550                         case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4551                                 ioc_info(ioc, "%s\n",
4552                                          MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4553                                 break;
4554                         case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4555                                 ioc_info(ioc, "%s\n",
4556                                          MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4557                                 break;
4558                         default:
4559                                 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4560                                          ioc->pdev->subsystem_device);
4561                                 break;
4562                         }
4563                         break;
4564                 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4565                         switch (ioc->pdev->subsystem_device) {
4566                         case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4567                                 ioc_info(ioc, "%s\n",
4568                                          MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4569                                 break;
4570                         case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4571                                 ioc_info(ioc, "%s\n",
4572                                          MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4573                                 break;
4574                         default:
4575                                 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4576                                          ioc->pdev->subsystem_device);
4577                                 break;
4578                         }
4579                         break;
4580                 default:
4581                         ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4582                                  ioc->pdev->subsystem_device);
4583                         break;
4584                 }
4585                 break;
4586         case MPT2SAS_HP_3PAR_SSVID:
4587                 switch (ioc->pdev->device) {
4588                 case MPI2_MFGPAGE_DEVID_SAS2004:
4589                         switch (ioc->pdev->subsystem_device) {
4590                         case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4591                                 ioc_info(ioc, "%s\n",
4592                                          MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4593                                 break;
4594                         default:
4595                                 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4596                                          ioc->pdev->subsystem_device);
4597                                 break;
4598                         }
4599                         break;
4600                 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4601                         switch (ioc->pdev->subsystem_device) {
4602                         case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4603                                 ioc_info(ioc, "%s\n",
4604                                          MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4605                                 break;
4606                         case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4607                                 ioc_info(ioc, "%s\n",
4608                                          MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4609                                 break;
4610                         case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4611                                 ioc_info(ioc, "%s\n",
4612                                          MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4613                                 break;
4614                         case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4615                                 ioc_info(ioc, "%s\n",
4616                                          MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4617                                 break;
4618                         default:
4619                                 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4620                                          ioc->pdev->subsystem_device);
4621                                 break;
4622                         }
4623                         break;
4624                 default:
4625                         ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4626                                  ioc->pdev->subsystem_device);
4627                         break;
4628                 }
4629                 break;
4630         default:
4631                 break;
4632         }
4633 }
4634
4635 /**
4636  * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4637  *                              version from FW Image Header.
4638  * @ioc: per adapter object
4639  *
4640  * Return: 0 for success, non-zero for failure.
4641  */
4642         static int
4643 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4644 {
4645         Mpi2FWImageHeader_t *fw_img_hdr;
4646         Mpi26ComponentImageHeader_t *cmp_img_hdr;
4647         Mpi25FWUploadRequest_t *mpi_request;
4648         Mpi2FWUploadReply_t mpi_reply;
4649         int r = 0, issue_diag_reset = 0;
4650         u32  package_version = 0;
4651         void *fwpkg_data = NULL;
4652         dma_addr_t fwpkg_data_dma;
4653         u16 smid, ioc_status;
4654         size_t data_length;
4655
4656         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4657
4658         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4659                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4660                 return -EAGAIN;
4661         }
4662
4663         data_length = sizeof(Mpi2FWImageHeader_t);
4664         fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4665                         &fwpkg_data_dma, GFP_KERNEL);
4666         if (!fwpkg_data) {
4667                 ioc_err(ioc,
4668                     "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4669                         __FILE__, __LINE__, __func__);
4670                 return -ENOMEM;
4671         }
4672
4673         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4674         if (!smid) {
4675                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4676                 r = -EAGAIN;
4677                 goto out;
4678         }
4679
4680         ioc->base_cmds.status = MPT3_CMD_PENDING;
4681         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4682         ioc->base_cmds.smid = smid;
4683         memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4684         mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4685         mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4686         mpi_request->ImageSize = cpu_to_le32(data_length);
4687         ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4688                         data_length);
4689         init_completion(&ioc->base_cmds.done);
4690         ioc->put_smid_default(ioc, smid);
4691         /* Wait for 15 seconds */
4692         wait_for_completion_timeout(&ioc->base_cmds.done,
4693                         FW_IMG_HDR_READ_TIMEOUT*HZ);
4694         ioc_info(ioc, "%s: complete\n", __func__);
4695         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4696                 ioc_err(ioc, "%s: timeout\n", __func__);
4697                 _debug_dump_mf(mpi_request,
4698                                 sizeof(Mpi25FWUploadRequest_t)/4);
4699                 issue_diag_reset = 1;
4700         } else {
4701                 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4702                 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4703                         memcpy(&mpi_reply, ioc->base_cmds.reply,
4704                                         sizeof(Mpi2FWUploadReply_t));
4705                         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4706                                                 MPI2_IOCSTATUS_MASK;
4707                         if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4708                                 fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4709                                 if (le32_to_cpu(fw_img_hdr->Signature) ==
4710                                     MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4711                                         cmp_img_hdr =
4712                                             (Mpi26ComponentImageHeader_t *)
4713                                             (fwpkg_data);
4714                                         package_version =
4715                                             le32_to_cpu(
4716                                             cmp_img_hdr->ApplicationSpecific);
4717                                 } else
4718                                         package_version =
4719                                             le32_to_cpu(
4720                                             fw_img_hdr->PackageVersion.Word);
4721                                 if (package_version)
4722                                         ioc_info(ioc,
4723                                         "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4724                                         ((package_version) & 0xFF000000) >> 24,
4725                                         ((package_version) & 0x00FF0000) >> 16,
4726                                         ((package_version) & 0x0000FF00) >> 8,
4727                                         (package_version) & 0x000000FF);
4728                         } else {
4729                                 _debug_dump_mf(&mpi_reply,
4730                                                 sizeof(Mpi2FWUploadReply_t)/4);
4731                         }
4732                 }
4733         }
4734         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4735 out:
4736         if (fwpkg_data)
4737                 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4738                                 fwpkg_data_dma);
4739         if (issue_diag_reset) {
4740                 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
4741                         return -EFAULT;
4742                 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
4743                         return -EFAULT;
4744                 r = -EAGAIN;
4745         }
4746         return r;
4747 }
4748
4749 /**
4750  * _base_display_ioc_capabilities - Display IOC's capabilities.
4751  * @ioc: per adapter object
4752  */
4753 static void
4754 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4755 {
4756         int i = 0;
4757         char desc[17] = {0};
4758         u32 iounit_pg1_flags;
4759
4760         strncpy(desc, ioc->manu_pg0.ChipName, 16);
4761         ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
4762                  desc,
4763                  (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4764                  (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4765                  (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4766                  ioc->facts.FWVersion.Word & 0x000000FF,
4767                  ioc->pdev->revision);
4768
4769         _base_display_OEMs_branding(ioc);
4770
4771         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4772                 pr_info("%sNVMe", i ? "," : "");
4773                 i++;
4774         }
4775
4776         ioc_info(ioc, "Protocol=(");
4777
4778         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4779                 pr_cont("Initiator");
4780                 i++;
4781         }
4782
4783         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4784                 pr_cont("%sTarget", i ? "," : "");
4785                 i++;
4786         }
4787
4788         i = 0;
4789         pr_cont("), Capabilities=(");
4790
4791         if (!ioc->hide_ir_msg) {
4792                 if (ioc->facts.IOCCapabilities &
4793                     MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4794                         pr_cont("Raid");
4795                         i++;
4796                 }
4797         }
4798
4799         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4800                 pr_cont("%sTLR", i ? "," : "");
4801                 i++;
4802         }
4803
4804         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4805                 pr_cont("%sMulticast", i ? "," : "");
4806                 i++;
4807         }
4808
4809         if (ioc->facts.IOCCapabilities &
4810             MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4811                 pr_cont("%sBIDI Target", i ? "," : "");
4812                 i++;
4813         }
4814
4815         if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4816                 pr_cont("%sEEDP", i ? "," : "");
4817                 i++;
4818         }
4819
4820         if (ioc->facts.IOCCapabilities &
4821             MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4822                 pr_cont("%sSnapshot Buffer", i ? "," : "");
4823                 i++;
4824         }
4825
4826         if (ioc->facts.IOCCapabilities &
4827             MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4828                 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4829                 i++;
4830         }
4831
4832         if (ioc->facts.IOCCapabilities &
4833             MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4834                 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4835                 i++;
4836         }
4837
4838         if (ioc->facts.IOCCapabilities &
4839             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4840                 pr_cont("%sTask Set Full", i ? "," : "");
4841                 i++;
4842         }
4843
4844         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4845         if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4846                 pr_cont("%sNCQ", i ? "," : "");
4847                 i++;
4848         }
4849
4850         pr_cont(")\n");
4851 }
4852
4853 /**
4854  * mpt3sas_base_update_missing_delay - change the missing delay timers
4855  * @ioc: per adapter object
4856  * @device_missing_delay: amount of time till device is reported missing
4857  * @io_missing_delay: interval IO is returned when there is a missing device
4858  *
4859  * Passed on the command line, this function will modify the device missing
4860  * delay, as well as the io missing delay. This should be called at driver
4861  * load time.
4862  */
4863 void
4864 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4865         u16 device_missing_delay, u8 io_missing_delay)
4866 {
4867         u16 dmd, dmd_new, dmd_orignal;
4868         u8 io_missing_delay_original;
4869         u16 sz;
4870         Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4871         Mpi2ConfigReply_t mpi_reply;
4872         u8 num_phys = 0;
4873         u16 ioc_status;
4874
4875         mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4876         if (!num_phys)
4877                 return;
4878
4879         sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4880             sizeof(Mpi2SasIOUnit1PhyData_t));
4881         sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4882         if (!sas_iounit_pg1) {
4883                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4884                         __FILE__, __LINE__, __func__);
4885                 goto out;
4886         }
4887         if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4888             sas_iounit_pg1, sz))) {
4889                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4890                         __FILE__, __LINE__, __func__);
4891                 goto out;
4892         }
4893         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4894             MPI2_IOCSTATUS_MASK;
4895         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4896                 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4897                         __FILE__, __LINE__, __func__);
4898                 goto out;
4899         }
4900
4901         /* device missing delay */
4902         dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4903         if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4904                 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4905         else
4906                 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4907         dmd_orignal = dmd;
4908         if (device_missing_delay > 0x7F) {
4909                 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4910                     device_missing_delay;
4911                 dmd = dmd / 16;
4912                 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4913         } else
4914                 dmd = device_missing_delay;
4915         sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4916
4917         /* io missing delay */
4918         io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4919         sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4920
4921         if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4922             sz)) {
4923                 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4924                         dmd_new = (dmd &
4925                             MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4926                 else
4927                         dmd_new =
4928                     dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4929                 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4930                          dmd_orignal, dmd_new);
4931                 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4932                          io_missing_delay_original,
4933                          io_missing_delay);
4934                 ioc->device_missing_delay = dmd_new;
4935                 ioc->io_missing_delay = io_missing_delay;
4936         }
4937
4938 out:
4939         kfree(sas_iounit_pg1);
4940 }
4941
4942 /**
4943  * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4944  *    according to performance mode.
4945  * @ioc : per adapter object
4946  *
4947  * Return: zero on success; otherwise return EAGAIN error code asking the
4948  * caller to retry.
4949  */
4950 static int
4951 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4952 {
4953         Mpi2IOCPage1_t ioc_pg1;
4954         Mpi2ConfigReply_t mpi_reply;
4955         int rc;
4956
4957         rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4958         if (rc)
4959                 return rc;
4960         memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4961
4962         switch (perf_mode) {
4963         case MPT_PERF_MODE_DEFAULT:
4964         case MPT_PERF_MODE_BALANCED:
4965                 if (ioc->high_iops_queues) {
4966                         ioc_info(ioc,
4967                                 "Enable interrupt coalescing only for first\t"
4968                                 "%d reply queues\n",
4969                                 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4970                         /*
4971                          * If 31st bit is zero then interrupt coalescing is
4972                          * enabled for all reply descriptor post queues.
4973                          * If 31st bit is set to one then user can
4974                          * enable/disable interrupt coalescing on per reply
4975                          * descriptor post queue group(8) basis. So to enable
4976                          * interrupt coalescing only on first reply descriptor
4977                          * post queue group 31st bit and zero th bit is enabled.
4978                          */
4979                         ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4980                             ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4981                         rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4982                         if (rc)
4983                                 return rc;
4984                         ioc_info(ioc, "performance mode: balanced\n");
4985                         return 0;
4986                 }
4987                 fallthrough;
4988         case MPT_PERF_MODE_LATENCY:
4989                 /*
4990                  * Enable interrupt coalescing on all reply queues
4991                  * with timeout value 0xA
4992                  */
4993                 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4994                 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4995                 ioc_pg1.ProductSpecific = 0;
4996                 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4997                 if (rc)
4998                         return rc;
4999                 ioc_info(ioc, "performance mode: latency\n");
5000                 break;
5001         case MPT_PERF_MODE_IOPS:
5002                 /*
5003                  * Enable interrupt coalescing on all reply queues.
5004                  */
5005                 ioc_info(ioc,
5006                     "performance mode: iops with coalescing timeout: 0x%x\n",
5007                     le32_to_cpu(ioc_pg1.CoalescingTimeout));
5008                 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
5009                 ioc_pg1.ProductSpecific = 0;
5010                 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5011                 if (rc)
5012                         return rc;
5013                 break;
5014         }
5015         return 0;
5016 }
5017
5018 /**
5019  * _base_get_event_diag_triggers - get event diag trigger values from
5020  *                              persistent pages
5021  * @ioc : per adapter object
5022  *
5023  * Return: nothing.
5024  */
5025 static int
5026 _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5027 {
5028         Mpi26DriverTriggerPage2_t trigger_pg2;
5029         struct SL_WH_EVENT_TRIGGER_T *event_tg;
5030         MPI26_DRIVER_MPI_EVENT_TIGGER_ENTRY *mpi_event_tg;
5031         Mpi2ConfigReply_t mpi_reply;
5032         int r = 0, i = 0;
5033         u16 count = 0;
5034         u16 ioc_status;
5035
5036         r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
5037             &trigger_pg2);
5038         if (r)
5039                 return r;
5040
5041         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5042             MPI2_IOCSTATUS_MASK;
5043         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5044                 dinitprintk(ioc,
5045                     ioc_err(ioc,
5046                     "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
5047                    __func__, ioc_status));
5048                 return 0;
5049         }
5050
5051         if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
5052                 count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
5053                 count = min_t(u16, NUM_VALID_ENTRIES, count);
5054                 ioc->diag_trigger_event.ValidEntries = count;
5055
5056                 event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
5057                 mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
5058                 for (i = 0; i < count; i++) {
5059                         event_tg->EventValue = le16_to_cpu(
5060                             mpi_event_tg->MPIEventCode);
5061                         event_tg->LogEntryQualifier = le16_to_cpu(
5062                             mpi_event_tg->MPIEventCodeSpecific);
5063                         event_tg++;
5064                         mpi_event_tg++;
5065                 }
5066         }
5067         return 0;
5068 }
5069
5070 /**
5071  * _base_get_scsi_diag_triggers - get scsi diag trigger values from
5072  *                              persistent pages
5073  * @ioc : per adapter object
5074  *
5075  * Return: 0 on success; otherwise return failure status.
5076  */
5077 static int
5078 _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5079 {
5080         Mpi26DriverTriggerPage3_t trigger_pg3;
5081         struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
5082         MPI26_DRIVER_SCSI_SENSE_TIGGER_ENTRY *mpi_scsi_tg;
5083         Mpi2ConfigReply_t mpi_reply;
5084         int r = 0, i = 0;
5085         u16 count = 0;
5086         u16 ioc_status;
5087
5088         r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
5089             &trigger_pg3);
5090         if (r)
5091                 return r;
5092
5093         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5094             MPI2_IOCSTATUS_MASK;
5095         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5096                 dinitprintk(ioc,
5097                     ioc_err(ioc,
5098                     "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
5099                     __func__, ioc_status));
5100                 return 0;
5101         }
5102
5103         if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
5104                 count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
5105                 count = min_t(u16, NUM_VALID_ENTRIES, count);
5106                 ioc->diag_trigger_scsi.ValidEntries = count;
5107
5108                 scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
5109                 mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
5110                 for (i = 0; i < count; i++) {
5111                         scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
5112                         scsi_tg->ASC = mpi_scsi_tg->ASC;
5113                         scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
5114
5115                         scsi_tg++;
5116                         mpi_scsi_tg++;
5117                 }
5118         }
5119         return 0;
5120 }
5121
5122 /**
5123  * _base_get_mpi_diag_triggers - get mpi diag trigger values from
5124  *                              persistent pages
5125  * @ioc : per adapter object
5126  *
5127  * Return: 0 on success; otherwise return failure status.
5128  */
5129 static int
5130 _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5131 {
5132         Mpi26DriverTriggerPage4_t trigger_pg4;
5133         struct SL_WH_MPI_TRIGGER_T *status_tg;
5134         MPI26_DRIVER_IOCSTATUS_LOGINFO_TIGGER_ENTRY *mpi_status_tg;
5135         Mpi2ConfigReply_t mpi_reply;
5136         int r = 0, i = 0;
5137         u16 count = 0;
5138         u16 ioc_status;
5139
5140         r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
5141             &trigger_pg4);
5142         if (r)
5143                 return r;
5144
5145         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5146             MPI2_IOCSTATUS_MASK;
5147         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5148                 dinitprintk(ioc,
5149                     ioc_err(ioc,
5150                     "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
5151                     __func__, ioc_status));
5152                 return 0;
5153         }
5154
5155         if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
5156                 count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
5157                 count = min_t(u16, NUM_VALID_ENTRIES, count);
5158                 ioc->diag_trigger_mpi.ValidEntries = count;
5159
5160                 status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
5161                 mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
5162
5163                 for (i = 0; i < count; i++) {
5164                         status_tg->IOCStatus = le16_to_cpu(
5165                             mpi_status_tg->IOCStatus);
5166                         status_tg->IocLogInfo = le32_to_cpu(
5167                             mpi_status_tg->LogInfo);
5168
5169                         status_tg++;
5170                         mpi_status_tg++;
5171                 }
5172         }
5173         return 0;
5174 }
5175
5176 /**
5177  * _base_get_master_diag_triggers - get master diag trigger values from
5178  *                              persistent pages
5179  * @ioc : per adapter object
5180  *
5181  * Return: nothing.
5182  */
5183 static int
5184 _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5185 {
5186         Mpi26DriverTriggerPage1_t trigger_pg1;
5187         Mpi2ConfigReply_t mpi_reply;
5188         int r;
5189         u16 ioc_status;
5190
5191         r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
5192             &trigger_pg1);
5193         if (r)
5194                 return r;
5195
5196         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5197             MPI2_IOCSTATUS_MASK;
5198         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5199                 dinitprintk(ioc,
5200                     ioc_err(ioc,
5201                     "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
5202                    __func__, ioc_status));
5203                 return 0;
5204         }
5205
5206         if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
5207                 ioc->diag_trigger_master.MasterData |=
5208                     le32_to_cpu(
5209                     trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
5210         return 0;
5211 }
5212
5213 /**
5214  * _base_check_for_trigger_pages_support - checks whether HBA FW supports
5215  *                                      driver trigger pages or not
5216  * @ioc : per adapter object
5217  * @trigger_flags : address where trigger page0's TriggerFlags value is copied
5218  *
5219  * Return: trigger flags mask if HBA FW supports driver trigger pages;
5220  * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
5221  * return EAGAIN if diag reset occurred due to FW fault and asking the
5222  * caller to retry the command.
5223  *
5224  */
5225 static int
5226 _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
5227 {
5228         Mpi26DriverTriggerPage0_t trigger_pg0;
5229         int r = 0;
5230         Mpi2ConfigReply_t mpi_reply;
5231         u16 ioc_status;
5232
5233         r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
5234             &trigger_pg0);
5235         if (r)
5236                 return r;
5237
5238         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5239             MPI2_IOCSTATUS_MASK;
5240         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5241                 return -EFAULT;
5242
5243         *trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
5244         return 0;
5245 }
5246
5247 /**
5248  * _base_get_diag_triggers - Retrieve diag trigger values from
5249  *                              persistent pages.
5250  * @ioc : per adapter object
5251  *
5252  * Return: zero on success; otherwise return EAGAIN error codes
5253  * asking the caller to retry.
5254  */
5255 static int
5256 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5257 {
5258         int trigger_flags;
5259         int r;
5260
5261         /*
5262          * Default setting of master trigger.
5263          */
5264         ioc->diag_trigger_master.MasterData =
5265             (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
5266
5267         r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
5268         if (r) {
5269                 if (r == -EAGAIN)
5270                         return r;
5271                 /*
5272                  * Don't go for error handling when FW doesn't support
5273                  * driver trigger pages.
5274                  */
5275                 return 0;
5276         }
5277
5278         ioc->supports_trigger_pages = 1;
5279
5280         /*
5281          * Retrieve master diag trigger values from driver trigger pg1
5282          * if master trigger bit enabled in TriggerFlags.
5283          */
5284         if ((u16)trigger_flags &
5285             MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
5286                 r = _base_get_master_diag_triggers(ioc);
5287                 if (r)
5288                         return r;
5289         }
5290
5291         /*
5292          * Retrieve event diag trigger values from driver trigger pg2
5293          * if event trigger bit enabled in TriggerFlags.
5294          */
5295         if ((u16)trigger_flags &
5296             MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
5297                 r = _base_get_event_diag_triggers(ioc);
5298                 if (r)
5299                         return r;
5300         }
5301
5302         /*
5303          * Retrieve scsi diag trigger values from driver trigger pg3
5304          * if scsi trigger bit enabled in TriggerFlags.
5305          */
5306         if ((u16)trigger_flags &
5307             MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
5308                 r = _base_get_scsi_diag_triggers(ioc);
5309                 if (r)
5310                         return r;
5311         }
5312         /*
5313          * Retrieve mpi error diag trigger values from driver trigger pg4
5314          * if loginfo trigger bit enabled in TriggerFlags.
5315          */
5316         if ((u16)trigger_flags &
5317             MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
5318                 r = _base_get_mpi_diag_triggers(ioc);
5319                 if (r)
5320                         return r;
5321         }
5322         return 0;
5323 }
5324
5325 /**
5326  * _base_update_diag_trigger_pages - Update the driver trigger pages after
5327  *                      online FW update, in case updated FW supports driver
5328  *                      trigger pages.
5329  * @ioc : per adapter object
5330  *
5331  * Return: nothing.
5332  */
5333 static void
5334 _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
5335 {
5336
5337         if (ioc->diag_trigger_master.MasterData)
5338                 mpt3sas_config_update_driver_trigger_pg1(ioc,
5339                     &ioc->diag_trigger_master, 1);
5340
5341         if (ioc->diag_trigger_event.ValidEntries)
5342                 mpt3sas_config_update_driver_trigger_pg2(ioc,
5343                     &ioc->diag_trigger_event, 1);
5344
5345         if (ioc->diag_trigger_scsi.ValidEntries)
5346                 mpt3sas_config_update_driver_trigger_pg3(ioc,
5347                     &ioc->diag_trigger_scsi, 1);
5348
5349         if (ioc->diag_trigger_mpi.ValidEntries)
5350                 mpt3sas_config_update_driver_trigger_pg4(ioc,
5351                     &ioc->diag_trigger_mpi, 1);
5352 }
5353
5354 /**
5355  * _base_assign_fw_reported_qd  - Get FW reported QD for SAS/SATA devices.
5356  *                              - On failure set default QD values.
5357  * @ioc : per adapter object
5358  *
5359  * Returns 0 for success, non-zero for failure.
5360  *
5361  */
5362 static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
5363 {
5364         Mpi2ConfigReply_t mpi_reply;
5365         Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5366         Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
5367         u16 depth;
5368         int sz;
5369         int rc = 0;
5370
5371         ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5372         ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5373         ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
5374         ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
5375         if (!ioc->is_gen35_ioc)
5376                 goto out;
5377         /* sas iounit page 1 */
5378         sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
5379         sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
5380         if (!sas_iounit_pg1) {
5381                 pr_err("%s: failure at %s:%d/%s()!\n",
5382                     ioc->name, __FILE__, __LINE__, __func__);
5383                 return rc;
5384         }
5385         rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5386             sas_iounit_pg1, sz);
5387         if (rc) {
5388                 pr_err("%s: failure at %s:%d/%s()!\n",
5389                     ioc->name, __FILE__, __LINE__, __func__);
5390                 goto out;
5391         }
5392
5393         depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
5394         ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5395
5396         depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
5397         ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5398
5399         depth = sas_iounit_pg1->SATAMaxQDepth;
5400         ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
5401
5402         /* pcie iounit page 1 */
5403         rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
5404             &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
5405         if (rc) {
5406                 pr_err("%s: failure at %s:%d/%s()!\n",
5407                     ioc->name, __FILE__, __LINE__, __func__);
5408                 goto out;
5409         }
5410         ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
5411             (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
5412             MPT3SAS_NVME_QUEUE_DEPTH;
5413 out:
5414         dinitprintk(ioc, pr_err(
5415             "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
5416             ioc->max_wideport_qd, ioc->max_narrowport_qd,
5417             ioc->max_sata_qd, ioc->max_nvme_qd));
5418         kfree(sas_iounit_pg1);
5419         return rc;
5420 }
5421
5422 /**
5423  * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
5424  *
5425  * @ioc : per adapter object
5426  * @n   : ptr to the ATTO nvram structure
5427  * Return: 0 for success, non-zero for failure.
5428  */
5429 static int
5430 mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
5431                             struct ATTO_SAS_NVRAM *n)
5432 {
5433         int r = -EINVAL;
5434         union ATTO_SAS_ADDRESS *s1;
5435         u32 len;
5436         u8 *pb;
5437         u8 ckSum;
5438
5439         /* validate nvram checksum */
5440         pb = (u8 *) n;
5441         ckSum = ATTO_SASNVR_CKSUM_SEED;
5442         len = sizeof(struct ATTO_SAS_NVRAM);
5443
5444         while (len--)
5445                 ckSum = ckSum + pb[len];
5446
5447         if (ckSum) {
5448                 ioc_err(ioc, "Invalid ATTO NVRAM checksum\n");
5449                 return r;
5450         }
5451
5452         s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr;
5453
5454         if (n->Signature[0] != 'E'
5455         || n->Signature[1] != 'S'
5456         || n->Signature[2] != 'A'
5457         || n->Signature[3] != 'S')
5458                 ioc_err(ioc, "Invalid ATTO NVRAM signature\n");
5459         else if (n->Version > ATTO_SASNVR_VERSION)
5460                 ioc_info(ioc, "Invalid ATTO NVRAM version");
5461         else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1))
5462                         || s1->b[0] != 0x50
5463                         || s1->b[1] != 0x01
5464                         || s1->b[2] != 0x08
5465                         || (s1->b[3] & 0xF0) != 0x60
5466                         || ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) {
5467                 ioc_err(ioc, "Invalid ATTO SAS address\n");
5468         } else
5469                 r = 0;
5470         return r;
5471 }
5472
5473 /**
5474  * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
5475  *
5476  * @ioc : per adapter object
5477  * @*sas_addr : return sas address
5478  * Return: 0 for success, non-zero for failure.
5479  */
5480 static int
5481 mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr)
5482 {
5483         Mpi2ManufacturingPage1_t mfg_pg1;
5484         Mpi2ConfigReply_t mpi_reply;
5485         struct ATTO_SAS_NVRAM *nvram;
5486         int r;
5487         __be64 addr;
5488
5489         r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1);
5490         if (r) {
5491                 ioc_err(ioc, "Failed to read manufacturing page 1\n");
5492                 return r;
5493         }
5494
5495         /* validate nvram */
5496         nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD;
5497         r = mpt3sas_atto_validate_nvram(ioc, nvram);
5498         if (r)
5499                 return r;
5500
5501         addr = *((__be64 *) nvram->SasAddr);
5502         sas_addr->q = cpu_to_le64(be64_to_cpu(addr));
5503         return r;
5504 }
5505
5506 /**
5507  * mpt3sas_atto_init - perform initializaion for ATTO branded
5508  *                                      adapter.
5509  * @ioc : per adapter object
5510  *5
5511  * Return: 0 for success, non-zero for failure.
5512  */
5513 static int
5514 mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc)
5515 {
5516         int sz = 0;
5517         Mpi2BiosPage4_t *bios_pg4 = NULL;
5518         Mpi2ConfigReply_t mpi_reply;
5519         int r;
5520         int ix;
5521         union ATTO_SAS_ADDRESS sas_addr;
5522         union ATTO_SAS_ADDRESS temp;
5523         union ATTO_SAS_ADDRESS bias;
5524
5525         r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr);
5526         if (r)
5527                 return r;
5528
5529         /* get header first to get size */
5530         r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0);
5531         if (r) {
5532                 ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n");
5533                 return r;
5534         }
5535
5536         sz = mpi_reply.Header.PageLength * sizeof(u32);
5537         bios_pg4 = kzalloc(sz, GFP_KERNEL);
5538         if (!bios_pg4) {
5539                 ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n");
5540                 return -ENOMEM;
5541         }
5542
5543         /* read bios page 4 */
5544         r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
5545         if (r) {
5546                 ioc_err(ioc, "Failed to read ATTO bios page 4\n");
5547                 goto out;
5548         }
5549
5550         /* Update bios page 4 with the ATTO WWID */
5551         bias.q = sas_addr.q;
5552         bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS;
5553
5554         for (ix = 0; ix < bios_pg4->NumPhys; ix++) {
5555                 temp.q = sas_addr.q;
5556                 temp.b[7] += ix;
5557                 bios_pg4->Phy[ix].ReassignmentWWID = temp.q;
5558                 bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q;
5559         }
5560         r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
5561
5562 out:
5563         kfree(bios_pg4);
5564         return r;
5565 }
5566
5567 /**
5568  * _base_static_config_pages - static start of day config pages
5569  * @ioc: per adapter object
5570  */
5571 static int
5572 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
5573 {
5574         Mpi2ConfigReply_t mpi_reply;
5575         u32 iounit_pg1_flags;
5576         int tg_flags = 0;
5577         int rc;
5578         ioc->nvme_abort_timeout = 30;
5579
5580         rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
5581             &ioc->manu_pg0);
5582         if (rc)
5583                 return rc;
5584         if (ioc->ir_firmware) {
5585                 rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
5586                     &ioc->manu_pg10);
5587                 if (rc)
5588                         return rc;
5589         }
5590
5591         if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
5592                 rc = mpt3sas_atto_init(ioc);
5593                 if (rc)
5594                         return rc;
5595         }
5596
5597         /*
5598          * Ensure correct T10 PI operation if vendor left EEDPTagMode
5599          * flag unset in NVDATA.
5600          */
5601         rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
5602             &ioc->manu_pg11);
5603         if (rc)
5604                 return rc;
5605         if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
5606                 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
5607                     ioc->name);
5608                 ioc->manu_pg11.EEDPTagMode &= ~0x3;
5609                 ioc->manu_pg11.EEDPTagMode |= 0x1;
5610                 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
5611                     &ioc->manu_pg11);
5612         }
5613         if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
5614                 ioc->tm_custom_handling = 1;
5615         else {
5616                 ioc->tm_custom_handling = 0;
5617                 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
5618                         ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
5619                 else if (ioc->manu_pg11.NVMeAbortTO >
5620                                         NVME_TASK_ABORT_MAX_TIMEOUT)
5621                         ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
5622                 else
5623                         ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
5624         }
5625         ioc->time_sync_interval =
5626             ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
5627         if (ioc->time_sync_interval) {
5628                 if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
5629                         ioc->time_sync_interval =
5630                             ioc->time_sync_interval * SECONDS_PER_HOUR;
5631                 else
5632                         ioc->time_sync_interval =
5633                             ioc->time_sync_interval * SECONDS_PER_MIN;
5634                 dinitprintk(ioc, ioc_info(ioc,
5635                     "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
5636                     ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
5637                     MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
5638         } else {
5639                 if (ioc->is_gen35_ioc)
5640                         ioc_warn(ioc,
5641                             "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
5642         }
5643         rc = _base_assign_fw_reported_qd(ioc);
5644         if (rc)
5645                 return rc;
5646
5647         /*
5648          * ATTO doesn't use bios page 2 and 3 for bios settings.
5649          */
5650         if (ioc->pdev->vendor ==  MPI2_MFGPAGE_VENDORID_ATTO)
5651                 ioc->bios_pg3.BiosVersion = 0;
5652         else {
5653                 rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
5654                 if (rc)
5655                         return rc;
5656                 rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
5657                 if (rc)
5658                         return rc;
5659         }
5660
5661         rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
5662         if (rc)
5663                 return rc;
5664         rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
5665         if (rc)
5666                 return rc;
5667         rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5668         if (rc)
5669                 return rc;
5670         rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
5671         if (rc)
5672                 return rc;
5673         _base_display_ioc_capabilities(ioc);
5674
5675         /*
5676          * Enable task_set_full handling in iounit_pg1 when the
5677          * facts capabilities indicate that its supported.
5678          */
5679         iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
5680         if ((ioc->facts.IOCCapabilities &
5681             MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
5682                 iounit_pg1_flags &=
5683                     ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5684         else
5685                 iounit_pg1_flags |=
5686                     MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5687         ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
5688         rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5689         if (rc)
5690                 return rc;
5691
5692         if (ioc->iounit_pg8.NumSensors)
5693                 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
5694         if (ioc->is_aero_ioc) {
5695                 rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
5696                 if (rc)
5697                         return rc;
5698         }
5699         if (ioc->is_gen35_ioc) {
5700                 if (ioc->is_driver_loading) {
5701                         rc = _base_get_diag_triggers(ioc);
5702                         if (rc)
5703                                 return rc;
5704                 } else {
5705                         /*
5706                          * In case of online HBA FW update operation,
5707                          * check whether updated FW supports the driver trigger
5708                          * pages or not.
5709                          * - If previous FW has not supported driver trigger
5710                          *   pages and newer FW supports them then update these
5711                          *   pages with current diag trigger values.
5712                          * - If previous FW has supported driver trigger pages
5713                          *   and new FW doesn't support them then disable
5714                          *   support_trigger_pages flag.
5715                          */
5716                         _base_check_for_trigger_pages_support(ioc, &tg_flags);
5717                         if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
5718                                 _base_update_diag_trigger_pages(ioc);
5719                         else if (ioc->supports_trigger_pages &&
5720                             tg_flags == -EFAULT)
5721                                 ioc->supports_trigger_pages = 0;
5722                 }
5723         }
5724         return 0;
5725 }
5726
5727 /**
5728  * mpt3sas_free_enclosure_list - release memory
5729  * @ioc: per adapter object
5730  *
5731  * Free memory allocated during enclosure add.
5732  */
5733 void
5734 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
5735 {
5736         struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
5737
5738         /* Free enclosure list */
5739         list_for_each_entry_safe(enclosure_dev,
5740                         enclosure_dev_next, &ioc->enclosure_list, list) {
5741                 list_del(&enclosure_dev->list);
5742                 kfree(enclosure_dev);
5743         }
5744 }
5745
5746 /**
5747  * _base_release_memory_pools - release memory
5748  * @ioc: per adapter object
5749  *
5750  * Free memory allocated from _base_allocate_memory_pools.
5751  */
5752 static void
5753 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5754 {
5755         int i = 0;
5756         int j = 0;
5757         int dma_alloc_count = 0;
5758         struct chain_tracker *ct;
5759         int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5760
5761         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5762
5763         if (ioc->request) {
5764                 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
5765                     ioc->request,  ioc->request_dma);
5766                 dexitprintk(ioc,
5767                             ioc_info(ioc, "request_pool(0x%p): free\n",
5768                                      ioc->request));
5769                 ioc->request = NULL;
5770         }
5771
5772         if (ioc->sense) {
5773                 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5774                 dma_pool_destroy(ioc->sense_dma_pool);
5775                 dexitprintk(ioc,
5776                             ioc_info(ioc, "sense_pool(0x%p): free\n",
5777                                      ioc->sense));
5778                 ioc->sense = NULL;
5779         }
5780
5781         if (ioc->reply) {
5782                 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
5783                 dma_pool_destroy(ioc->reply_dma_pool);
5784                 dexitprintk(ioc,
5785                             ioc_info(ioc, "reply_pool(0x%p): free\n",
5786                                      ioc->reply));
5787                 ioc->reply = NULL;
5788         }
5789
5790         if (ioc->reply_free) {
5791                 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
5792                     ioc->reply_free_dma);
5793                 dma_pool_destroy(ioc->reply_free_dma_pool);
5794                 dexitprintk(ioc,
5795                             ioc_info(ioc, "reply_free_pool(0x%p): free\n",
5796                                      ioc->reply_free));
5797                 ioc->reply_free = NULL;
5798         }
5799
5800         if (ioc->reply_post) {
5801                 dma_alloc_count = DIV_ROUND_UP(count,
5802                                 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5803                 for (i = 0; i < count; i++) {
5804                         if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
5805                             && dma_alloc_count) {
5806                                 if (ioc->reply_post[i].reply_post_free) {
5807                                         dma_pool_free(
5808                                             ioc->reply_post_free_dma_pool,
5809                                             ioc->reply_post[i].reply_post_free,
5810                                         ioc->reply_post[i].reply_post_free_dma);
5811                                         dexitprintk(ioc, ioc_info(ioc,
5812                                            "reply_post_free_pool(0x%p): free\n",
5813                                            ioc->reply_post[i].reply_post_free));
5814                                         ioc->reply_post[i].reply_post_free =
5815                                                                         NULL;
5816                                 }
5817                                 --dma_alloc_count;
5818                         }
5819                 }
5820                 dma_pool_destroy(ioc->reply_post_free_dma_pool);
5821                 if (ioc->reply_post_free_array &&
5822                         ioc->rdpq_array_enable) {
5823                         dma_pool_free(ioc->reply_post_free_array_dma_pool,
5824                             ioc->reply_post_free_array,
5825                             ioc->reply_post_free_array_dma);
5826                         ioc->reply_post_free_array = NULL;
5827                 }
5828                 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
5829                 kfree(ioc->reply_post);
5830         }
5831
5832         if (ioc->pcie_sgl_dma_pool) {
5833                 for (i = 0; i < ioc->scsiio_depth; i++) {
5834                         dma_pool_free(ioc->pcie_sgl_dma_pool,
5835                                         ioc->pcie_sg_lookup[i].pcie_sgl,
5836                                         ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5837                         ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
5838                 }
5839                 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
5840         }
5841         kfree(ioc->pcie_sg_lookup);
5842         ioc->pcie_sg_lookup = NULL;
5843
5844         if (ioc->config_page) {
5845                 dexitprintk(ioc,
5846                             ioc_info(ioc, "config_page(0x%p): free\n",
5847                                      ioc->config_page));
5848                 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
5849                     ioc->config_page, ioc->config_page_dma);
5850         }
5851
5852         kfree(ioc->hpr_lookup);
5853         ioc->hpr_lookup = NULL;
5854         kfree(ioc->internal_lookup);
5855         ioc->internal_lookup = NULL;
5856         if (ioc->chain_lookup) {
5857                 for (i = 0; i < ioc->scsiio_depth; i++) {
5858                         for (j = ioc->chains_per_prp_buffer;
5859                             j < ioc->chains_needed_per_io; j++) {
5860                                 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5861                                 if (ct && ct->chain_buffer)
5862                                         dma_pool_free(ioc->chain_dma_pool,
5863                                                 ct->chain_buffer,
5864                                                 ct->chain_buffer_dma);
5865                         }
5866                         kfree(ioc->chain_lookup[i].chains_per_smid);
5867                 }
5868                 dma_pool_destroy(ioc->chain_dma_pool);
5869                 kfree(ioc->chain_lookup);
5870                 ioc->chain_lookup = NULL;
5871         }
5872
5873         kfree(ioc->io_queue_num);
5874         ioc->io_queue_num = NULL;
5875 }
5876
5877 /**
5878  * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
5879  *      having same upper 32bits in their base memory address.
5880  * @start_address: Base address of a reply queue set
5881  * @pool_sz: Size of single Reply Descriptor Post Queues pool size
5882  *
5883  * Return: 1 if reply queues in a set have a same upper 32bits in their base
5884  * memory address, else 0.
5885  */
5886 static int
5887 mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
5888 {
5889         dma_addr_t end_address;
5890
5891         end_address = start_address + pool_sz - 1;
5892
5893         if (upper_32_bits(start_address) == upper_32_bits(end_address))
5894                 return 1;
5895         else
5896                 return 0;
5897 }
5898
5899 /**
5900  * _base_reduce_hba_queue_depth- Retry with reduced queue depth
5901  * @ioc: Adapter object
5902  *
5903  * Return: 0 for success, non-zero for failure.
5904  **/
5905 static inline int
5906 _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
5907 {
5908         int reduce_sz = 64;
5909
5910         if ((ioc->hba_queue_depth - reduce_sz) >
5911             (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
5912                 ioc->hba_queue_depth -= reduce_sz;
5913                 return 0;
5914         } else
5915                 return -ENOMEM;
5916 }
5917
5918 /**
5919  * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
5920  *                      for pcie sgl pools.
5921  * @ioc: Adapter object
5922  * @sz: DMA Pool size
5923  *
5924  * Return: 0 for success, non-zero for failure.
5925  */
5926
5927 static int
5928 _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5929 {
5930         int i = 0, j = 0;
5931         struct chain_tracker *ct;
5932
5933         ioc->pcie_sgl_dma_pool =
5934             dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
5935             ioc->page_size, 0);
5936         if (!ioc->pcie_sgl_dma_pool) {
5937                 ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5938                 return -ENOMEM;
5939         }
5940
5941         ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5942         ioc->chains_per_prp_buffer =
5943             min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
5944         for (i = 0; i < ioc->scsiio_depth; i++) {
5945                 ioc->pcie_sg_lookup[i].pcie_sgl =
5946                     dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5947                     &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5948                 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5949                         ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5950                         return -EAGAIN;
5951                 }
5952
5953                 if (!mpt3sas_check_same_4gb_region(
5954                     ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
5955                         ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5956                             ioc->pcie_sg_lookup[i].pcie_sgl,
5957                             (unsigned long long)
5958                             ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5959                         ioc->use_32bit_dma = true;
5960                         return -EAGAIN;
5961                 }
5962
5963                 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5964                         ct = &ioc->chain_lookup[i].chains_per_smid[j];
5965                         ct->chain_buffer =
5966                             ioc->pcie_sg_lookup[i].pcie_sgl +
5967                             (j * ioc->chain_segment_sz);
5968                         ct->chain_buffer_dma =
5969                             ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5970                             (j * ioc->chain_segment_sz);
5971                 }
5972         }
5973         dinitprintk(ioc, ioc_info(ioc,
5974             "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5975             ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
5976         dinitprintk(ioc, ioc_info(ioc,
5977             "Number of chains can fit in a PRP page(%d)\n",
5978             ioc->chains_per_prp_buffer));
5979         return 0;
5980 }
5981
5982 /**
5983  * _base_allocate_chain_dma_pool - Allocating DMA'able memory
5984  *                      for chain dma pool.
5985  * @ioc: Adapter object
5986  * @sz: DMA Pool size
5987  *
5988  * Return: 0 for success, non-zero for failure.
5989  */
5990 static int
5991 _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5992 {
5993         int i = 0, j = 0;
5994         struct chain_tracker *ctr;
5995
5996         ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5997             ioc->chain_segment_sz, 16, 0);
5998         if (!ioc->chain_dma_pool)
5999                 return -ENOMEM;
6000
6001         for (i = 0; i < ioc->scsiio_depth; i++) {
6002                 for (j = ioc->chains_per_prp_buffer;
6003                     j < ioc->chains_needed_per_io; j++) {
6004                         ctr = &ioc->chain_lookup[i].chains_per_smid[j];
6005                         ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
6006                             GFP_KERNEL, &ctr->chain_buffer_dma);
6007                         if (!ctr->chain_buffer)
6008                                 return -EAGAIN;
6009                         if (!mpt3sas_check_same_4gb_region(
6010                             ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
6011                                 ioc_err(ioc,
6012                                     "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
6013                                     ctr->chain_buffer,
6014                                     (unsigned long long)ctr->chain_buffer_dma);
6015                                 ioc->use_32bit_dma = true;
6016                                 return -EAGAIN;
6017                         }
6018                 }
6019         }
6020         dinitprintk(ioc, ioc_info(ioc,
6021             "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
6022             ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
6023             (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
6024             ioc->chain_segment_sz))/1024));
6025         return 0;
6026 }
6027
6028 /**
6029  * _base_allocate_sense_dma_pool - Allocating DMA'able memory
6030  *                      for sense dma pool.
6031  * @ioc: Adapter object
6032  * @sz: DMA Pool size
6033  * Return: 0 for success, non-zero for failure.
6034  */
6035 static int
6036 _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6037 {
6038         ioc->sense_dma_pool =
6039             dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
6040         if (!ioc->sense_dma_pool)
6041                 return -ENOMEM;
6042         ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
6043             GFP_KERNEL, &ioc->sense_dma);
6044         if (!ioc->sense)
6045                 return -EAGAIN;
6046         if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
6047                 dinitprintk(ioc, pr_err(
6048                     "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
6049                     ioc->sense, (unsigned long long) ioc->sense_dma));
6050                 ioc->use_32bit_dma = true;
6051                 return -EAGAIN;
6052         }
6053         ioc_info(ioc,
6054             "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
6055             ioc->sense, (unsigned long long)ioc->sense_dma,
6056             ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
6057         return 0;
6058 }
6059
6060 /**
6061  * _base_allocate_reply_pool - Allocating DMA'able memory
6062  *                      for reply pool.
6063  * @ioc: Adapter object
6064  * @sz: DMA Pool size
6065  * Return: 0 for success, non-zero for failure.
6066  */
6067 static int
6068 _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6069 {
6070         /* reply pool, 4 byte align */
6071         ioc->reply_dma_pool = dma_pool_create("reply pool",
6072             &ioc->pdev->dev, sz, 4, 0);
6073         if (!ioc->reply_dma_pool)
6074                 return -ENOMEM;
6075         ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
6076             &ioc->reply_dma);
6077         if (!ioc->reply)
6078                 return -EAGAIN;
6079         if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
6080                 dinitprintk(ioc, pr_err(
6081                     "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
6082                     ioc->reply, (unsigned long long) ioc->reply_dma));
6083                 ioc->use_32bit_dma = true;
6084                 return -EAGAIN;
6085         }
6086         ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
6087         ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
6088         ioc_info(ioc,
6089             "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
6090             ioc->reply, (unsigned long long)ioc->reply_dma,
6091             ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
6092         return 0;
6093 }
6094
6095 /**
6096  * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
6097  *                      for reply free dma pool.
6098  * @ioc: Adapter object
6099  * @sz: DMA Pool size
6100  * Return: 0 for success, non-zero for failure.
6101  */
6102 static int
6103 _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6104 {
6105         /* reply free queue, 16 byte align */
6106         ioc->reply_free_dma_pool = dma_pool_create(
6107             "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
6108         if (!ioc->reply_free_dma_pool)
6109                 return -ENOMEM;
6110         ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
6111             GFP_KERNEL, &ioc->reply_free_dma);
6112         if (!ioc->reply_free)
6113                 return -EAGAIN;
6114         if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
6115                 dinitprintk(ioc,
6116                     pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
6117                     ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
6118                 ioc->use_32bit_dma = true;
6119                 return -EAGAIN;
6120         }
6121         memset(ioc->reply_free, 0, sz);
6122         dinitprintk(ioc, ioc_info(ioc,
6123             "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
6124             ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
6125         dinitprintk(ioc, ioc_info(ioc,
6126             "reply_free_dma (0x%llx)\n",
6127             (unsigned long long)ioc->reply_free_dma));
6128         return 0;
6129 }
6130
6131 /**
6132  * _base_allocate_reply_post_free_array - Allocating DMA'able memory
6133  *                      for reply post free array.
6134  * @ioc: Adapter object
6135  * @reply_post_free_array_sz: DMA Pool size
6136  * Return: 0 for success, non-zero for failure.
6137  */
6138
6139 static int
6140 _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
6141         u32 reply_post_free_array_sz)
6142 {
6143         ioc->reply_post_free_array_dma_pool =
6144             dma_pool_create("reply_post_free_array pool",
6145             &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
6146         if (!ioc->reply_post_free_array_dma_pool)
6147                 return -ENOMEM;
6148         ioc->reply_post_free_array =
6149             dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
6150             GFP_KERNEL, &ioc->reply_post_free_array_dma);
6151         if (!ioc->reply_post_free_array)
6152                 return -EAGAIN;
6153         if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
6154             reply_post_free_array_sz)) {
6155                 dinitprintk(ioc, pr_err(
6156                     "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
6157                     ioc->reply_free,
6158                     (unsigned long long) ioc->reply_free_dma));
6159                 ioc->use_32bit_dma = true;
6160                 return -EAGAIN;
6161         }
6162         return 0;
6163 }
6164 /**
6165  * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
6166  *                     for reply queues.
6167  * @ioc: per adapter object
6168  * @sz: DMA Pool size
6169  * Return: 0 for success, non-zero for failure.
6170  */
6171 static int
6172 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
6173 {
6174         int i = 0;
6175         u32 dma_alloc_count = 0;
6176         int reply_post_free_sz = ioc->reply_post_queue_depth *
6177                 sizeof(Mpi2DefaultReplyDescriptor_t);
6178         int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
6179
6180         ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
6181                         GFP_KERNEL);
6182         if (!ioc->reply_post)
6183                 return -ENOMEM;
6184         /*
6185          *  For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
6186          *  VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
6187          *  be within 4GB boundary i.e reply queues in a set must have same
6188          *  upper 32-bits in their memory address. so here driver is allocating
6189          *  the DMA'able memory for reply queues according.
6190          *  Driver uses limitation of
6191          *  VENTURA_SERIES to manage INVADER_SERIES as well.
6192          */
6193         dma_alloc_count = DIV_ROUND_UP(count,
6194                                 RDPQ_MAX_INDEX_IN_ONE_CHUNK);
6195         ioc->reply_post_free_dma_pool =
6196                 dma_pool_create("reply_post_free pool",
6197                     &ioc->pdev->dev, sz, 16, 0);
6198         if (!ioc->reply_post_free_dma_pool)
6199                 return -ENOMEM;
6200         for (i = 0; i < count; i++) {
6201                 if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
6202                         ioc->reply_post[i].reply_post_free =
6203                             dma_pool_zalloc(ioc->reply_post_free_dma_pool,
6204                                 GFP_KERNEL,
6205                                 &ioc->reply_post[i].reply_post_free_dma);
6206                         if (!ioc->reply_post[i].reply_post_free)
6207                                 return -ENOMEM;
6208                         /*
6209                          * Each set of RDPQ pool must satisfy 4gb boundary
6210                          * restriction.
6211                          * 1) Check if allocated resources for RDPQ pool are in
6212                          *      the same 4GB range.
6213                          * 2) If #1 is true, continue with 64 bit DMA.
6214                          * 3) If #1 is false, return 1. which means free all the
6215                          * resources and set DMA mask to 32 and allocate.
6216                          */
6217                         if (!mpt3sas_check_same_4gb_region(
6218                                 ioc->reply_post[i].reply_post_free_dma, sz)) {
6219                                 dinitprintk(ioc,
6220                                     ioc_err(ioc, "bad Replypost free pool(0x%p)"
6221                                     "reply_post_free_dma = (0x%llx)\n",
6222                                     ioc->reply_post[i].reply_post_free,
6223                                     (unsigned long long)
6224                                     ioc->reply_post[i].reply_post_free_dma));
6225                                 return -EAGAIN;
6226                         }
6227                         dma_alloc_count--;
6228
6229                 } else {
6230                         ioc->reply_post[i].reply_post_free =
6231                             (Mpi2ReplyDescriptorsUnion_t *)
6232                             ((long)ioc->reply_post[i-1].reply_post_free
6233                             + reply_post_free_sz);
6234                         ioc->reply_post[i].reply_post_free_dma =
6235                             (dma_addr_t)
6236                             (ioc->reply_post[i-1].reply_post_free_dma +
6237                             reply_post_free_sz);
6238                 }
6239         }
6240         return 0;
6241 }
6242
6243 /**
6244  * _base_allocate_memory_pools - allocate start of day memory pools
6245  * @ioc: per adapter object
6246  *
6247  * Return: 0 success, anything else error.
6248  */
6249 static int
6250 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
6251 {
6252         struct mpt3sas_facts *facts;
6253         u16 max_sge_elements;
6254         u16 chains_needed_per_io;
6255         u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
6256         u32 retry_sz;
6257         u32 rdpq_sz = 0, sense_sz = 0;
6258         u16 max_request_credit, nvme_blocks_needed;
6259         unsigned short sg_tablesize;
6260         u16 sge_size;
6261         int i;
6262         int ret = 0, rc = 0;
6263
6264         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6265
6266
6267         retry_sz = 0;
6268         facts = &ioc->facts;
6269
6270         /* command line tunables for max sgl entries */
6271         if (max_sgl_entries != -1)
6272                 sg_tablesize = max_sgl_entries;
6273         else {
6274                 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
6275                         sg_tablesize = MPT2SAS_SG_DEPTH;
6276                 else
6277                         sg_tablesize = MPT3SAS_SG_DEPTH;
6278         }
6279
6280         /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
6281         if (reset_devices)
6282                 sg_tablesize = min_t(unsigned short, sg_tablesize,
6283                    MPT_KDUMP_MIN_PHYS_SEGMENTS);
6284
6285         if (ioc->is_mcpu_endpoint)
6286                 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
6287         else {
6288                 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
6289                         sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
6290                 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
6291                         sg_tablesize = min_t(unsigned short, sg_tablesize,
6292                                         SG_MAX_SEGMENTS);
6293                         ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
6294                                  sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
6295                 }
6296                 ioc->shost->sg_tablesize = sg_tablesize;
6297         }
6298
6299         ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
6300                 (facts->RequestCredit / 4));
6301         if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
6302                 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
6303                                 INTERNAL_SCSIIO_CMDS_COUNT)) {
6304                         ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
6305                                 facts->RequestCredit);
6306                         return -ENOMEM;
6307                 }
6308                 ioc->internal_depth = 10;
6309         }
6310
6311         ioc->hi_priority_depth = ioc->internal_depth - (5);
6312         /* command line tunables  for max controller queue depth */
6313         if (max_queue_depth != -1 && max_queue_depth != 0) {
6314                 max_request_credit = min_t(u16, max_queue_depth +
6315                         ioc->internal_depth, facts->RequestCredit);
6316                 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
6317                         max_request_credit =  MAX_HBA_QUEUE_DEPTH;
6318         } else if (reset_devices)
6319                 max_request_credit = min_t(u16, facts->RequestCredit,
6320                     (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
6321         else
6322                 max_request_credit = min_t(u16, facts->RequestCredit,
6323                     MAX_HBA_QUEUE_DEPTH);
6324
6325         /* Firmware maintains additional facts->HighPriorityCredit number of
6326          * credits for HiPriprity Request messages, so hba queue depth will be
6327          * sum of max_request_credit and high priority queue depth.
6328          */
6329         ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
6330
6331         /* request frame size */
6332         ioc->request_sz = facts->IOCRequestFrameSize * 4;
6333
6334         /* reply frame size */
6335         ioc->reply_sz = facts->ReplyFrameSize * 4;
6336
6337         /* chain segment size */
6338         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6339                 if (facts->IOCMaxChainSegmentSize)
6340                         ioc->chain_segment_sz =
6341                                         facts->IOCMaxChainSegmentSize *
6342                                         MAX_CHAIN_ELEMT_SZ;
6343                 else
6344                 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
6345                         ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
6346                                                     MAX_CHAIN_ELEMT_SZ;
6347         } else
6348                 ioc->chain_segment_sz = ioc->request_sz;
6349
6350         /* calculate the max scatter element size */
6351         sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
6352
6353  retry_allocation:
6354         total_sz = 0;
6355         /* calculate number of sg elements left over in the 1st frame */
6356         max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
6357             sizeof(Mpi2SGEIOUnion_t)) + sge_size);
6358         ioc->max_sges_in_main_message = max_sge_elements/sge_size;
6359
6360         /* now do the same for a chain buffer */
6361         max_sge_elements = ioc->chain_segment_sz - sge_size;
6362         ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
6363
6364         /*
6365          *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
6366          */
6367         chains_needed_per_io = ((ioc->shost->sg_tablesize -
6368            ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
6369             + 1;
6370         if (chains_needed_per_io > facts->MaxChainDepth) {
6371                 chains_needed_per_io = facts->MaxChainDepth;
6372                 ioc->shost->sg_tablesize = min_t(u16,
6373                 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
6374                 * chains_needed_per_io), ioc->shost->sg_tablesize);
6375         }
6376         ioc->chains_needed_per_io = chains_needed_per_io;
6377
6378         /* reply free queue sizing - taking into account for 64 FW events */
6379         ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6380
6381         /* mCPU manage single counters for simplicity */
6382         if (ioc->is_mcpu_endpoint)
6383                 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
6384         else {
6385                 /* calculate reply descriptor post queue depth */
6386                 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
6387                         ioc->reply_free_queue_depth +  1;
6388                 /* align the reply post queue on the next 16 count boundary */
6389                 if (ioc->reply_post_queue_depth % 16)
6390                         ioc->reply_post_queue_depth += 16 -
6391                                 (ioc->reply_post_queue_depth % 16);
6392         }
6393
6394         if (ioc->reply_post_queue_depth >
6395             facts->MaxReplyDescriptorPostQueueDepth) {
6396                 ioc->reply_post_queue_depth =
6397                                 facts->MaxReplyDescriptorPostQueueDepth -
6398                     (facts->MaxReplyDescriptorPostQueueDepth % 16);
6399                 ioc->hba_queue_depth =
6400                                 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
6401                 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6402         }
6403
6404         ioc_info(ioc,
6405             "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
6406             "sge_per_io(%d), chains_per_io(%d)\n",
6407             ioc->max_sges_in_main_message,
6408             ioc->max_sges_in_chain_message,
6409             ioc->shost->sg_tablesize,
6410             ioc->chains_needed_per_io);
6411
6412         /* reply post queue, 16 byte align */
6413         reply_post_free_sz = ioc->reply_post_queue_depth *
6414             sizeof(Mpi2DefaultReplyDescriptor_t);
6415         rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
6416         if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
6417             || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
6418                 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
6419         ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
6420         if (ret == -EAGAIN) {
6421                 /*
6422                  * Free allocated bad RDPQ memory pools.
6423                  * Change dma coherent mask to 32 bit and reallocate RDPQ
6424                  */
6425                 _base_release_memory_pools(ioc);
6426                 ioc->use_32bit_dma = true;
6427                 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6428                         ioc_err(ioc,
6429                             "32 DMA mask failed %s\n", pci_name(ioc->pdev));
6430                         return -ENODEV;
6431                 }
6432                 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
6433                         return -ENOMEM;
6434         } else if (ret == -ENOMEM)
6435                 return -ENOMEM;
6436         total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
6437             DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
6438         ioc->scsiio_depth = ioc->hba_queue_depth -
6439             ioc->hi_priority_depth - ioc->internal_depth;
6440
6441         /* set the scsi host can_queue depth
6442          * with some internal commands that could be outstanding
6443          */
6444         ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
6445         dinitprintk(ioc,
6446                     ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
6447                              ioc->shost->can_queue));
6448
6449         /* contiguous pool for request and chains, 16 byte align, one extra "
6450          * "frame for smid=0
6451          */
6452         ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
6453         sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
6454
6455         /* hi-priority queue */
6456         sz += (ioc->hi_priority_depth * ioc->request_sz);
6457
6458         /* internal queue */
6459         sz += (ioc->internal_depth * ioc->request_sz);
6460
6461         ioc->request_dma_sz = sz;
6462         ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
6463                         &ioc->request_dma, GFP_KERNEL);
6464         if (!ioc->request) {
6465                 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
6466                         ioc->hba_queue_depth, ioc->chains_needed_per_io,
6467                         ioc->request_sz, sz / 1024);
6468                 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
6469                         goto out;
6470                 retry_sz = 64;
6471                 ioc->hba_queue_depth -= retry_sz;
6472                 _base_release_memory_pools(ioc);
6473                 goto retry_allocation;
6474         }
6475
6476         if (retry_sz)
6477                 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
6478                         ioc->hba_queue_depth, ioc->chains_needed_per_io,
6479                         ioc->request_sz, sz / 1024);
6480
6481         /* hi-priority queue */
6482         ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
6483             ioc->request_sz);
6484         ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
6485             ioc->request_sz);
6486
6487         /* internal queue */
6488         ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
6489             ioc->request_sz);
6490         ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
6491             ioc->request_sz);
6492
6493         ioc_info(ioc,
6494             "request pool(0x%p) - dma(0x%llx): "
6495             "depth(%d), frame_size(%d), pool_size(%d kB)\n",
6496             ioc->request, (unsigned long long) ioc->request_dma,
6497             ioc->hba_queue_depth, ioc->request_sz,
6498             (ioc->hba_queue_depth * ioc->request_sz) / 1024);
6499
6500         total_sz += sz;
6501
6502         dinitprintk(ioc,
6503                     ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
6504                              ioc->request, ioc->scsiio_depth));
6505
6506         ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
6507         sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
6508         ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
6509         if (!ioc->chain_lookup) {
6510                 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
6511                 goto out;
6512         }
6513
6514         sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
6515         for (i = 0; i < ioc->scsiio_depth; i++) {
6516                 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
6517                 if (!ioc->chain_lookup[i].chains_per_smid) {
6518                         ioc_err(ioc, "chain_lookup: kzalloc failed\n");
6519                         goto out;
6520                 }
6521         }
6522
6523         /* initialize hi-priority queue smid's */
6524         ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
6525             sizeof(struct request_tracker), GFP_KERNEL);
6526         if (!ioc->hpr_lookup) {
6527                 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
6528                 goto out;
6529         }
6530         ioc->hi_priority_smid = ioc->scsiio_depth + 1;
6531         dinitprintk(ioc,
6532                     ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6533                              ioc->hi_priority,
6534                              ioc->hi_priority_depth, ioc->hi_priority_smid));
6535
6536         /* initialize internal queue smid's */
6537         ioc->internal_lookup = kcalloc(ioc->internal_depth,
6538             sizeof(struct request_tracker), GFP_KERNEL);
6539         if (!ioc->internal_lookup) {
6540                 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
6541                 goto out;
6542         }
6543         ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
6544         dinitprintk(ioc,
6545                     ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
6546                              ioc->internal,
6547                              ioc->internal_depth, ioc->internal_smid));
6548
6549         ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
6550             sizeof(u16), GFP_KERNEL);
6551         if (!ioc->io_queue_num)
6552                 goto out;
6553         /*
6554          * The number of NVMe page sized blocks needed is:
6555          *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
6556          * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
6557          * that is placed in the main message frame.  8 is the size of each PRP
6558          * entry or PRP list pointer entry.  8 is subtracted from page_size
6559          * because of the PRP list pointer entry at the end of a page, so this
6560          * is not counted as a PRP entry.  The 1 added page is a round up.
6561          *
6562          * To avoid allocation failures due to the amount of memory that could
6563          * be required for NVMe PRP's, only each set of NVMe blocks will be
6564          * contiguous, so a new set is allocated for each possible I/O.
6565          */
6566
6567         ioc->chains_per_prp_buffer = 0;
6568         if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
6569                 nvme_blocks_needed =
6570                         (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
6571                 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
6572                 nvme_blocks_needed++;
6573
6574                 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
6575                 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
6576                 if (!ioc->pcie_sg_lookup) {
6577                         ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
6578                         goto out;
6579                 }
6580                 sz = nvme_blocks_needed * ioc->page_size;
6581                 rc = _base_allocate_pcie_sgl_pool(ioc, sz);
6582                 if (rc == -ENOMEM)
6583                         return -ENOMEM;
6584                 else if (rc == -EAGAIN)
6585                         goto try_32bit_dma;
6586                 total_sz += sz * ioc->scsiio_depth;
6587         }
6588
6589         rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
6590         if (rc == -ENOMEM)
6591                 return -ENOMEM;
6592         else if (rc == -EAGAIN)
6593                 goto try_32bit_dma;
6594         total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
6595                 ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
6596         dinitprintk(ioc,
6597             ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6598             ioc->chain_depth, ioc->chain_segment_sz,
6599             (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
6600         /* sense buffers, 4 byte align */
6601         sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
6602         rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
6603         if (rc  == -ENOMEM)
6604                 return -ENOMEM;
6605         else if (rc == -EAGAIN)
6606                 goto try_32bit_dma;
6607         total_sz += sense_sz;
6608         /* reply pool, 4 byte align */
6609         sz = ioc->reply_free_queue_depth * ioc->reply_sz;
6610         rc = _base_allocate_reply_pool(ioc, sz);
6611         if (rc == -ENOMEM)
6612                 return -ENOMEM;
6613         else if (rc == -EAGAIN)
6614                 goto try_32bit_dma;
6615         total_sz += sz;
6616
6617         /* reply free queue, 16 byte align */
6618         sz = ioc->reply_free_queue_depth * 4;
6619         rc = _base_allocate_reply_free_dma_pool(ioc, sz);
6620         if (rc  == -ENOMEM)
6621                 return -ENOMEM;
6622         else if (rc == -EAGAIN)
6623                 goto try_32bit_dma;
6624         dinitprintk(ioc,
6625                     ioc_info(ioc, "reply_free_dma (0x%llx)\n",
6626                              (unsigned long long)ioc->reply_free_dma));
6627         total_sz += sz;
6628         if (ioc->rdpq_array_enable) {
6629                 reply_post_free_array_sz = ioc->reply_queue_count *
6630                     sizeof(Mpi2IOCInitRDPQArrayEntry);
6631                 rc = _base_allocate_reply_post_free_array(ioc,
6632                     reply_post_free_array_sz);
6633                 if (rc == -ENOMEM)
6634                         return -ENOMEM;
6635                 else if (rc == -EAGAIN)
6636                         goto try_32bit_dma;
6637         }
6638         ioc->config_page_sz = 512;
6639         ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
6640                         ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
6641         if (!ioc->config_page) {
6642                 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
6643                 goto out;
6644         }
6645
6646         ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6647             ioc->config_page, (unsigned long long)ioc->config_page_dma,
6648             ioc->config_page_sz);
6649         total_sz += ioc->config_page_sz;
6650
6651         ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
6652                  total_sz / 1024);
6653         ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6654                  ioc->shost->can_queue, facts->RequestCredit);
6655         ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
6656                  ioc->shost->sg_tablesize);
6657         return 0;
6658
6659 try_32bit_dma:
6660         _base_release_memory_pools(ioc);
6661         if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
6662                 /* Change dma coherent mask to 32 bit and reallocate */
6663                 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6664                         pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
6665                             pci_name(ioc->pdev));
6666                         return -ENODEV;
6667                 }
6668         } else if (_base_reduce_hba_queue_depth(ioc) != 0)
6669                 return -ENOMEM;
6670         goto retry_allocation;
6671
6672  out:
6673         return -ENOMEM;
6674 }
6675
6676 /**
6677  * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
6678  * @ioc: Pointer to MPT_ADAPTER structure
6679  * @cooked: Request raw or cooked IOC state
6680  *
6681  * Return: all IOC Doorbell register bits if cooked==0, else just the
6682  * Doorbell bits in MPI_IOC_STATE_MASK.
6683  */
6684 u32
6685 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
6686 {
6687         u32 s, sc;
6688
6689         s = ioc->base_readl(&ioc->chip->Doorbell);
6690         sc = s & MPI2_IOC_STATE_MASK;
6691         return cooked ? sc : s;
6692 }
6693
6694 /**
6695  * _base_wait_on_iocstate - waiting on a particular ioc state
6696  * @ioc: ?
6697  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
6698  * @timeout: timeout in second
6699  *
6700  * Return: 0 for success, non-zero for failure.
6701  */
6702 static int
6703 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
6704 {
6705         u32 count, cntdn;
6706         u32 current_state;
6707
6708         count = 0;
6709         cntdn = 1000 * timeout;
6710         do {
6711                 current_state = mpt3sas_base_get_iocstate(ioc, 1);
6712                 if (current_state == ioc_state)
6713                         return 0;
6714                 if (count && current_state == MPI2_IOC_STATE_FAULT)
6715                         break;
6716                 if (count && current_state == MPI2_IOC_STATE_COREDUMP)
6717                         break;
6718
6719                 usleep_range(1000, 1500);
6720                 count++;
6721         } while (--cntdn);
6722
6723         return current_state;
6724 }
6725
6726 /**
6727  * _base_dump_reg_set - This function will print hexdump of register set.
6728  * @ioc: per adapter object
6729  *
6730  * Return: nothing.
6731  */
6732 static inline void
6733 _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
6734 {
6735         unsigned int i, sz = 256;
6736         u32 __iomem *reg = (u32 __iomem *)ioc->chip;
6737
6738         ioc_info(ioc, "System Register set:\n");
6739         for (i = 0; i < (sz / sizeof(u32)); i++)
6740                 pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
6741 }
6742
6743 /**
6744  * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
6745  * a write to the doorbell)
6746  * @ioc: per adapter object
6747  * @timeout: timeout in seconds
6748  *
6749  * Return: 0 for success, non-zero for failure.
6750  *
6751  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
6752  */
6753
6754 static int
6755 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6756 {
6757         u32 cntdn, count;
6758         u32 int_status;
6759
6760         count = 0;
6761         cntdn = 1000 * timeout;
6762         do {
6763                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6764                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6765                         dhsprintk(ioc,
6766                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6767                                            __func__, count, timeout));
6768                         return 0;
6769                 }
6770
6771                 usleep_range(1000, 1500);
6772                 count++;
6773         } while (--cntdn);
6774
6775         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6776                 __func__, count, int_status);
6777         return -EFAULT;
6778 }
6779
6780 static int
6781 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6782 {
6783         u32 cntdn, count;
6784         u32 int_status;
6785
6786         count = 0;
6787         cntdn = 2000 * timeout;
6788         do {
6789                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6790                 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6791                         dhsprintk(ioc,
6792                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6793                                            __func__, count, timeout));
6794                         return 0;
6795                 }
6796
6797                 udelay(500);
6798                 count++;
6799         } while (--cntdn);
6800
6801         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6802                 __func__, count, int_status);
6803         return -EFAULT;
6804
6805 }
6806
6807 /**
6808  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
6809  * @ioc: per adapter object
6810  * @timeout: timeout in second
6811  *
6812  * Return: 0 for success, non-zero for failure.
6813  *
6814  * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
6815  * doorbell.
6816  */
6817 static int
6818 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
6819 {
6820         u32 cntdn, count;
6821         u32 int_status;
6822         u32 doorbell;
6823
6824         count = 0;
6825         cntdn = 1000 * timeout;
6826         do {
6827                 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6828                 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
6829                         dhsprintk(ioc,
6830                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6831                                            __func__, count, timeout));
6832                         return 0;
6833                 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6834                         doorbell = ioc->base_readl(&ioc->chip->Doorbell);
6835                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
6836                             MPI2_IOC_STATE_FAULT) {
6837                                 mpt3sas_print_fault_code(ioc, doorbell);
6838                                 return -EFAULT;
6839                         }
6840                         if ((doorbell & MPI2_IOC_STATE_MASK) ==
6841                             MPI2_IOC_STATE_COREDUMP) {
6842                                 mpt3sas_print_coredump_info(ioc, doorbell);
6843                                 return -EFAULT;
6844                         }
6845                 } else if (int_status == 0xFFFFFFFF)
6846                         goto out;
6847
6848                 usleep_range(1000, 1500);
6849                 count++;
6850         } while (--cntdn);
6851
6852  out:
6853         ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6854                 __func__, count, int_status);
6855         return -EFAULT;
6856 }
6857
6858 /**
6859  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
6860  * @ioc: per adapter object
6861  * @timeout: timeout in second
6862  *
6863  * Return: 0 for success, non-zero for failure.
6864  */
6865 static int
6866 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
6867 {
6868         u32 cntdn, count;
6869         u32 doorbell_reg;
6870
6871         count = 0;
6872         cntdn = 1000 * timeout;
6873         do {
6874                 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
6875                 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
6876                         dhsprintk(ioc,
6877                                   ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6878                                            __func__, count, timeout));
6879                         return 0;
6880                 }
6881
6882                 usleep_range(1000, 1500);
6883                 count++;
6884         } while (--cntdn);
6885
6886         ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6887                 __func__, count, doorbell_reg);
6888         return -EFAULT;
6889 }
6890
6891 /**
6892  * _base_send_ioc_reset - send doorbell reset
6893  * @ioc: per adapter object
6894  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
6895  * @timeout: timeout in second
6896  *
6897  * Return: 0 for success, non-zero for failure.
6898  */
6899 static int
6900 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
6901 {
6902         u32 ioc_state;
6903         int r = 0;
6904         unsigned long flags;
6905
6906         if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
6907                 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
6908                 return -EFAULT;
6909         }
6910
6911         if (!(ioc->facts.IOCCapabilities &
6912            MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
6913                 return -EFAULT;
6914
6915         ioc_info(ioc, "sending message unit reset !!\n");
6916
6917         writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
6918             &ioc->chip->Doorbell);
6919         if ((_base_wait_for_doorbell_ack(ioc, 15))) {
6920                 r = -EFAULT;
6921                 goto out;
6922         }
6923
6924         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6925         if (ioc_state) {
6926                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6927                         __func__, ioc_state);
6928                 r = -EFAULT;
6929                 goto out;
6930         }
6931  out:
6932         if (r != 0) {
6933                 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6934                 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6935                 /*
6936                  * Wait for IOC state CoreDump to clear only during
6937                  * HBA initialization & release time.
6938                  */
6939                 if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6940                     MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
6941                     ioc->fault_reset_work_q == NULL)) {
6942                         spin_unlock_irqrestore(
6943                             &ioc->ioc_reset_in_progress_lock, flags);
6944                         mpt3sas_print_coredump_info(ioc, ioc_state);
6945                         mpt3sas_base_wait_for_coredump_completion(ioc,
6946                             __func__);
6947                         spin_lock_irqsave(
6948                             &ioc->ioc_reset_in_progress_lock, flags);
6949                 }
6950                 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6951         }
6952         ioc_info(ioc, "message unit reset: %s\n",
6953                  r == 0 ? "SUCCESS" : "FAILED");
6954         return r;
6955 }
6956
6957 /**
6958  * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
6959  * @ioc: per adapter object
6960  * @timeout: timeout in seconds
6961  *
6962  * Return: Waits up to timeout seconds for the IOC to
6963  * become operational. Returns 0 if IOC is present
6964  * and operational; otherwise returns %-EFAULT.
6965  */
6966
6967 int
6968 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
6969 {
6970         int wait_state_count = 0;
6971         u32 ioc_state;
6972
6973         do {
6974                 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
6975                 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
6976                         break;
6977
6978                 /*
6979                  * Watchdog thread will be started after IOC Initialization, so
6980                  * no need to wait here for IOC state to become operational
6981                  * when IOC Initialization is on. Instead the driver will
6982                  * return ETIME status, so that calling function can issue
6983                  * diag reset operation and retry the command.
6984                  */
6985                 if (ioc->is_driver_loading)
6986                         return -ETIME;
6987
6988                 ssleep(1);
6989                 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
6990                                 __func__, ++wait_state_count);
6991         } while (--timeout);
6992         if (!timeout) {
6993                 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
6994                 return -EFAULT;
6995         }
6996         if (wait_state_count)
6997                 ioc_info(ioc, "ioc is operational\n");
6998         return 0;
6999 }
7000
7001 /**
7002  * _base_handshake_req_reply_wait - send request thru doorbell interface
7003  * @ioc: per adapter object
7004  * @request_bytes: request length
7005  * @request: pointer having request payload
7006  * @reply_bytes: reply length
7007  * @reply: pointer to reply payload
7008  * @timeout: timeout in second
7009  *
7010  * Return: 0 for success, non-zero for failure.
7011  */
7012 static int
7013 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
7014         u32 *request, int reply_bytes, u16 *reply, int timeout)
7015 {
7016         MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
7017         int i;
7018         u8 failed;
7019         __le32 *mfp;
7020
7021         /* make sure doorbell is not in use */
7022         if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
7023                 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
7024                 return -EFAULT;
7025         }
7026
7027         /* clear pending doorbell interrupts from previous state changes */
7028         if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
7029             MPI2_HIS_IOC2SYS_DB_STATUS)
7030                 writel(0, &ioc->chip->HostInterruptStatus);
7031
7032         /* send message to ioc */
7033         writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
7034             ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
7035             &ioc->chip->Doorbell);
7036
7037         if ((_base_spin_on_doorbell_int(ioc, 5))) {
7038                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7039                         __LINE__);
7040                 return -EFAULT;
7041         }
7042         writel(0, &ioc->chip->HostInterruptStatus);
7043
7044         if ((_base_wait_for_doorbell_ack(ioc, 5))) {
7045                 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
7046                         __LINE__);
7047                 return -EFAULT;
7048         }
7049
7050         /* send message 32-bits at a time */
7051         for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
7052                 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
7053                 if ((_base_wait_for_doorbell_ack(ioc, 5)))
7054                         failed = 1;
7055         }
7056
7057         if (failed) {
7058                 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
7059                         __LINE__);
7060                 return -EFAULT;
7061         }
7062
7063         /* now wait for the reply */
7064         if ((_base_wait_for_doorbell_int(ioc, timeout))) {
7065                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7066                         __LINE__);
7067                 return -EFAULT;
7068         }
7069
7070         /* read the first two 16-bits, it gives the total length of the reply */
7071         reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
7072             & MPI2_DOORBELL_DATA_MASK);
7073         writel(0, &ioc->chip->HostInterruptStatus);
7074         if ((_base_wait_for_doorbell_int(ioc, 5))) {
7075                 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7076                         __LINE__);
7077                 return -EFAULT;
7078         }
7079         reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
7080             & MPI2_DOORBELL_DATA_MASK);
7081         writel(0, &ioc->chip->HostInterruptStatus);
7082
7083         for (i = 2; i < default_reply->MsgLength * 2; i++)  {
7084                 if ((_base_wait_for_doorbell_int(ioc, 5))) {
7085                         ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7086                                 __LINE__);
7087                         return -EFAULT;
7088                 }
7089                 if (i >=  reply_bytes/2) /* overflow case */
7090                         ioc->base_readl(&ioc->chip->Doorbell);
7091                 else
7092                         reply[i] = le16_to_cpu(
7093                             ioc->base_readl(&ioc->chip->Doorbell)
7094                             & MPI2_DOORBELL_DATA_MASK);
7095                 writel(0, &ioc->chip->HostInterruptStatus);
7096         }
7097
7098         _base_wait_for_doorbell_int(ioc, 5);
7099         if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
7100                 dhsprintk(ioc,
7101                           ioc_info(ioc, "doorbell is in use (line=%d)\n",
7102                                    __LINE__));
7103         }
7104         writel(0, &ioc->chip->HostInterruptStatus);
7105
7106         if (ioc->logging_level & MPT_DEBUG_INIT) {
7107                 mfp = (__le32 *)reply;
7108                 pr_info("\toffset:data\n");
7109                 for (i = 0; i < reply_bytes/4; i++)
7110                         ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7111                             le32_to_cpu(mfp[i]));
7112         }
7113         return 0;
7114 }
7115
7116 /**
7117  * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
7118  * @ioc: per adapter object
7119  * @mpi_reply: the reply payload from FW
7120  * @mpi_request: the request payload sent to FW
7121  *
7122  * The SAS IO Unit Control Request message allows the host to perform low-level
7123  * operations, such as resets on the PHYs of the IO Unit, also allows the host
7124  * to obtain the IOC assigned device handles for a device if it has other
7125  * identifying information about the device, in addition allows the host to
7126  * remove IOC resources associated with the device.
7127  *
7128  * Return: 0 for success, non-zero for failure.
7129  */
7130 int
7131 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
7132         Mpi2SasIoUnitControlReply_t *mpi_reply,
7133         Mpi2SasIoUnitControlRequest_t *mpi_request)
7134 {
7135         u16 smid;
7136         u8 issue_reset = 0;
7137         int rc;
7138         void *request;
7139
7140         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7141
7142         mutex_lock(&ioc->base_cmds.mutex);
7143
7144         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
7145                 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
7146                 rc = -EAGAIN;
7147                 goto out;
7148         }
7149
7150         rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
7151         if (rc)
7152                 goto out;
7153
7154         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7155         if (!smid) {
7156                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7157                 rc = -EAGAIN;
7158                 goto out;
7159         }
7160
7161         rc = 0;
7162         ioc->base_cmds.status = MPT3_CMD_PENDING;
7163         request = mpt3sas_base_get_msg_frame(ioc, smid);
7164         ioc->base_cmds.smid = smid;
7165         memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
7166         if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
7167             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
7168                 ioc->ioc_link_reset_in_progress = 1;
7169         init_completion(&ioc->base_cmds.done);
7170         ioc->put_smid_default(ioc, smid);
7171         wait_for_completion_timeout(&ioc->base_cmds.done,
7172             msecs_to_jiffies(10000));
7173         if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
7174             mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
7175             ioc->ioc_link_reset_in_progress)
7176                 ioc->ioc_link_reset_in_progress = 0;
7177         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7178                 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
7179                     mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
7180                     issue_reset);
7181                 goto issue_host_reset;
7182         }
7183         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7184                 memcpy(mpi_reply, ioc->base_cmds.reply,
7185                     sizeof(Mpi2SasIoUnitControlReply_t));
7186         else
7187                 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
7188         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7189         goto out;
7190
7191  issue_host_reset:
7192         if (issue_reset)
7193                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7194         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7195         rc = -EFAULT;
7196  out:
7197         mutex_unlock(&ioc->base_cmds.mutex);
7198         return rc;
7199 }
7200
7201 /**
7202  * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
7203  * @ioc: per adapter object
7204  * @mpi_reply: the reply payload from FW
7205  * @mpi_request: the request payload sent to FW
7206  *
7207  * The SCSI Enclosure Processor request message causes the IOC to
7208  * communicate with SES devices to control LED status signals.
7209  *
7210  * Return: 0 for success, non-zero for failure.
7211  */
7212 int
7213 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
7214         Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
7215 {
7216         u16 smid;
7217         u8 issue_reset = 0;
7218         int rc;
7219         void *request;
7220
7221         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7222
7223         mutex_lock(&ioc->base_cmds.mutex);
7224
7225         if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
7226                 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
7227                 rc = -EAGAIN;
7228                 goto out;
7229         }
7230
7231         rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
7232         if (rc)
7233                 goto out;
7234
7235         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7236         if (!smid) {
7237                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7238                 rc = -EAGAIN;
7239                 goto out;
7240         }
7241
7242         rc = 0;
7243         ioc->base_cmds.status = MPT3_CMD_PENDING;
7244         request = mpt3sas_base_get_msg_frame(ioc, smid);
7245         ioc->base_cmds.smid = smid;
7246         memset(request, 0, ioc->request_sz);
7247         memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
7248         init_completion(&ioc->base_cmds.done);
7249         ioc->put_smid_default(ioc, smid);
7250         wait_for_completion_timeout(&ioc->base_cmds.done,
7251             msecs_to_jiffies(10000));
7252         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7253                 mpt3sas_check_cmd_timeout(ioc,
7254                     ioc->base_cmds.status, mpi_request,
7255                     sizeof(Mpi2SepRequest_t)/4, issue_reset);
7256                 goto issue_host_reset;
7257         }
7258         if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7259                 memcpy(mpi_reply, ioc->base_cmds.reply,
7260                     sizeof(Mpi2SepReply_t));
7261         else
7262                 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
7263         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7264         goto out;
7265
7266  issue_host_reset:
7267         if (issue_reset)
7268                 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7269         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7270         rc = -EFAULT;
7271  out:
7272         mutex_unlock(&ioc->base_cmds.mutex);
7273         return rc;
7274 }
7275
7276 /**
7277  * _base_get_port_facts - obtain port facts reply and save in ioc
7278  * @ioc: per adapter object
7279  * @port: ?
7280  *
7281  * Return: 0 for success, non-zero for failure.
7282  */
7283 static int
7284 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
7285 {
7286         Mpi2PortFactsRequest_t mpi_request;
7287         Mpi2PortFactsReply_t mpi_reply;
7288         struct mpt3sas_port_facts *pfacts;
7289         int mpi_reply_sz, mpi_request_sz, r;
7290
7291         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7292
7293         mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
7294         mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
7295         memset(&mpi_request, 0, mpi_request_sz);
7296         mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
7297         mpi_request.PortNumber = port;
7298         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7299             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
7300
7301         if (r != 0) {
7302                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7303                 return r;
7304         }
7305
7306         pfacts = &ioc->pfacts[port];
7307         memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
7308         pfacts->PortNumber = mpi_reply.PortNumber;
7309         pfacts->VP_ID = mpi_reply.VP_ID;
7310         pfacts->VF_ID = mpi_reply.VF_ID;
7311         pfacts->MaxPostedCmdBuffers =
7312             le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
7313
7314         return 0;
7315 }
7316
7317 /**
7318  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
7319  * @ioc: per adapter object
7320  * @timeout:
7321  *
7322  * Return: 0 for success, non-zero for failure.
7323  */
7324 static int
7325 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
7326 {
7327         u32 ioc_state;
7328         int rc;
7329
7330         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7331
7332         if (ioc->pci_error_recovery) {
7333                 dfailprintk(ioc,
7334                             ioc_info(ioc, "%s: host in pci error recovery\n",
7335                                      __func__));
7336                 return -EFAULT;
7337         }
7338
7339         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7340         dhsprintk(ioc,
7341                   ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7342                            __func__, ioc_state));
7343
7344         if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
7345             (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
7346                 return 0;
7347
7348         if (ioc_state & MPI2_DOORBELL_USED) {
7349                 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
7350                 goto issue_diag_reset;
7351         }
7352
7353         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
7354                 mpt3sas_print_fault_code(ioc, ioc_state &
7355                     MPI2_DOORBELL_DATA_MASK);
7356                 goto issue_diag_reset;
7357         } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
7358             MPI2_IOC_STATE_COREDUMP) {
7359                 ioc_info(ioc,
7360                     "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
7361                     __func__, ioc_state);
7362                 return -EFAULT;
7363         }
7364
7365         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
7366         if (ioc_state) {
7367                 dfailprintk(ioc,
7368                             ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7369                                      __func__, ioc_state));
7370                 return -EFAULT;
7371         }
7372
7373  issue_diag_reset:
7374         rc = _base_diag_reset(ioc);
7375         return rc;
7376 }
7377
7378 /**
7379  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
7380  * @ioc: per adapter object
7381  *
7382  * Return: 0 for success, non-zero for failure.
7383  */
7384 static int
7385 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
7386 {
7387         Mpi2IOCFactsRequest_t mpi_request;
7388         Mpi2IOCFactsReply_t mpi_reply;
7389         struct mpt3sas_facts *facts;
7390         int mpi_reply_sz, mpi_request_sz, r;
7391
7392         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7393
7394         r = _base_wait_for_iocstate(ioc, 10);
7395         if (r) {
7396                 dfailprintk(ioc,
7397                             ioc_info(ioc, "%s: failed getting to correct state\n",
7398                                      __func__));
7399                 return r;
7400         }
7401         mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
7402         mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
7403         memset(&mpi_request, 0, mpi_request_sz);
7404         mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
7405         r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7406             (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
7407
7408         if (r != 0) {
7409                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7410                 return r;
7411         }
7412
7413         facts = &ioc->facts;
7414         memset(facts, 0, sizeof(struct mpt3sas_facts));
7415         facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
7416         facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
7417         facts->VP_ID = mpi_reply.VP_ID;
7418         facts->VF_ID = mpi_reply.VF_ID;
7419         facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
7420         facts->MaxChainDepth = mpi_reply.MaxChainDepth;
7421         facts->WhoInit = mpi_reply.WhoInit;
7422         facts->NumberOfPorts = mpi_reply.NumberOfPorts;
7423         facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
7424         if (ioc->msix_enable && (facts->MaxMSIxVectors <=
7425             MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
7426                 ioc->combined_reply_queue = 0;
7427         facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
7428         facts->MaxReplyDescriptorPostQueueDepth =
7429             le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
7430         facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
7431         facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
7432         if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
7433                 ioc->ir_firmware = 1;
7434         if ((facts->IOCCapabilities &
7435               MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
7436                 ioc->rdpq_array_capable = 1;
7437         if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
7438             && ioc->is_aero_ioc)
7439                 ioc->atomic_desc_capable = 1;
7440         facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
7441         facts->IOCRequestFrameSize =
7442             le16_to_cpu(mpi_reply.IOCRequestFrameSize);
7443         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7444                 facts->IOCMaxChainSegmentSize =
7445                         le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
7446         }
7447         facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
7448         facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
7449         ioc->shost->max_id = -1;
7450         facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
7451         facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
7452         facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
7453         facts->HighPriorityCredit =
7454             le16_to_cpu(mpi_reply.HighPriorityCredit);
7455         facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
7456         facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
7457         facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
7458
7459         /*
7460          * Get the Page Size from IOC Facts. If it's 0, default to 4k.
7461          */
7462         ioc->page_size = 1 << facts->CurrentHostPageSize;
7463         if (ioc->page_size == 1) {
7464                 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
7465                 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
7466         }
7467         dinitprintk(ioc,
7468                     ioc_info(ioc, "CurrentHostPageSize(%d)\n",
7469                              facts->CurrentHostPageSize));
7470
7471         dinitprintk(ioc,
7472                     ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
7473                              facts->RequestCredit, facts->MaxChainDepth));
7474         dinitprintk(ioc,
7475                     ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
7476                              facts->IOCRequestFrameSize * 4,
7477                              facts->ReplyFrameSize * 4));
7478         return 0;
7479 }
7480
7481 /**
7482  * _base_send_ioc_init - send ioc_init to firmware
7483  * @ioc: per adapter object
7484  *
7485  * Return: 0 for success, non-zero for failure.
7486  */
7487 static int
7488 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
7489 {
7490         Mpi2IOCInitRequest_t mpi_request;
7491         Mpi2IOCInitReply_t mpi_reply;
7492         int i, r = 0;
7493         ktime_t current_time;
7494         u16 ioc_status;
7495         u32 reply_post_free_array_sz = 0;
7496
7497         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7498
7499         memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
7500         mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
7501         mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
7502         mpi_request.VF_ID = 0; /* TODO */
7503         mpi_request.VP_ID = 0;
7504         mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
7505         mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
7506         mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
7507
7508         if (_base_is_controller_msix_enabled(ioc))
7509                 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
7510         mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
7511         mpi_request.ReplyDescriptorPostQueueDepth =
7512             cpu_to_le16(ioc->reply_post_queue_depth);
7513         mpi_request.ReplyFreeQueueDepth =
7514             cpu_to_le16(ioc->reply_free_queue_depth);
7515
7516         mpi_request.SenseBufferAddressHigh =
7517             cpu_to_le32((u64)ioc->sense_dma >> 32);
7518         mpi_request.SystemReplyAddressHigh =
7519             cpu_to_le32((u64)ioc->reply_dma >> 32);
7520         mpi_request.SystemRequestFrameBaseAddress =
7521             cpu_to_le64((u64)ioc->request_dma);
7522         mpi_request.ReplyFreeQueueAddress =
7523             cpu_to_le64((u64)ioc->reply_free_dma);
7524
7525         if (ioc->rdpq_array_enable) {
7526                 reply_post_free_array_sz = ioc->reply_queue_count *
7527                     sizeof(Mpi2IOCInitRDPQArrayEntry);
7528                 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
7529                 for (i = 0; i < ioc->reply_queue_count; i++)
7530                         ioc->reply_post_free_array[i].RDPQBaseAddress =
7531                             cpu_to_le64(
7532                                 (u64)ioc->reply_post[i].reply_post_free_dma);
7533                 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
7534                 mpi_request.ReplyDescriptorPostQueueAddress =
7535                     cpu_to_le64((u64)ioc->reply_post_free_array_dma);
7536         } else {
7537                 mpi_request.ReplyDescriptorPostQueueAddress =
7538                     cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
7539         }
7540
7541         /*
7542          * Set the flag to enable CoreDump state feature in IOC firmware.
7543          */
7544         mpi_request.ConfigurationFlags |=
7545             cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
7546
7547         /* This time stamp specifies number of milliseconds
7548          * since epoch ~ midnight January 1, 1970.
7549          */
7550         current_time = ktime_get_real();
7551         mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
7552
7553         if (ioc->logging_level & MPT_DEBUG_INIT) {
7554                 __le32 *mfp;
7555                 int i;
7556
7557                 mfp = (__le32 *)&mpi_request;
7558                 ioc_info(ioc, "\toffset:data\n");
7559                 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
7560                         ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7561                             le32_to_cpu(mfp[i]));
7562         }
7563
7564         r = _base_handshake_req_reply_wait(ioc,
7565             sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
7566             sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
7567
7568         if (r != 0) {
7569                 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7570                 return r;
7571         }
7572
7573         ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7574         if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
7575             mpi_reply.IOCLogInfo) {
7576                 ioc_err(ioc, "%s: failed\n", __func__);
7577                 r = -EIO;
7578         }
7579
7580         /* Reset TimeSync Counter*/
7581         ioc->timestamp_update_count = 0;
7582         return r;
7583 }
7584
7585 /**
7586  * mpt3sas_port_enable_done - command completion routine for port enable
7587  * @ioc: per adapter object
7588  * @smid: system request message index
7589  * @msix_index: MSIX table index supplied by the OS
7590  * @reply: reply message frame(lower 32bit addr)
7591  *
7592  * Return: 1 meaning mf should be freed from _base_interrupt
7593  *          0 means the mf is freed from this function.
7594  */
7595 u8
7596 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
7597         u32 reply)
7598 {
7599         MPI2DefaultReply_t *mpi_reply;
7600         u16 ioc_status;
7601
7602         if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
7603                 return 1;
7604
7605         mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7606         if (!mpi_reply)
7607                 return 1;
7608
7609         if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
7610                 return 1;
7611
7612         ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
7613         ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
7614         ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
7615         memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
7616         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7617         if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7618                 ioc->port_enable_failed = 1;
7619
7620         if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
7621                 ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
7622                 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
7623                         mpt3sas_port_enable_complete(ioc);
7624                         return 1;
7625                 } else {
7626                         ioc->start_scan_failed = ioc_status;
7627                         ioc->start_scan = 0;
7628                         return 1;
7629                 }
7630         }
7631         complete(&ioc->port_enable_cmds.done);
7632         return 1;
7633 }
7634
7635 /**
7636  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
7637  * @ioc: per adapter object
7638  *
7639  * Return: 0 for success, non-zero for failure.
7640  */
7641 static int
7642 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
7643 {
7644         Mpi2PortEnableRequest_t *mpi_request;
7645         Mpi2PortEnableReply_t *mpi_reply;
7646         int r = 0;
7647         u16 smid;
7648         u16 ioc_status;
7649
7650         ioc_info(ioc, "sending port enable !!\n");
7651
7652         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7653                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7654                 return -EAGAIN;
7655         }
7656
7657         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7658         if (!smid) {
7659                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7660                 return -EAGAIN;
7661         }
7662
7663         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7664         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7665         ioc->port_enable_cmds.smid = smid;
7666         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7667         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7668
7669         init_completion(&ioc->port_enable_cmds.done);
7670         ioc->put_smid_default(ioc, smid);
7671         wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
7672         if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
7673                 ioc_err(ioc, "%s: timeout\n", __func__);
7674                 _debug_dump_mf(mpi_request,
7675                     sizeof(Mpi2PortEnableRequest_t)/4);
7676                 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
7677                         r = -EFAULT;
7678                 else
7679                         r = -ETIME;
7680                 goto out;
7681         }
7682
7683         mpi_reply = ioc->port_enable_cmds.reply;
7684         ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7685         if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7686                 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
7687                         __func__, ioc_status);
7688                 r = -EFAULT;
7689                 goto out;
7690         }
7691
7692  out:
7693         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7694         ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
7695         return r;
7696 }
7697
7698 /**
7699  * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
7700  * @ioc: per adapter object
7701  *
7702  * Return: 0 for success, non-zero for failure.
7703  */
7704 int
7705 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
7706 {
7707         Mpi2PortEnableRequest_t *mpi_request;
7708         u16 smid;
7709
7710         ioc_info(ioc, "sending port enable !!\n");
7711
7712         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7713                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7714                 return -EAGAIN;
7715         }
7716
7717         smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7718         if (!smid) {
7719                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7720                 return -EAGAIN;
7721         }
7722         ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
7723         ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7724         ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
7725         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7726         ioc->port_enable_cmds.smid = smid;
7727         memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7728         mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7729
7730         ioc->put_smid_default(ioc, smid);
7731         return 0;
7732 }
7733
7734 /**
7735  * _base_determine_wait_on_discovery - desposition
7736  * @ioc: per adapter object
7737  *
7738  * Decide whether to wait on discovery to complete. Used to either
7739  * locate boot device, or report volumes ahead of physical devices.
7740  *
7741  * Return: 1 for wait, 0 for don't wait.
7742  */
7743 static int
7744 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
7745 {
7746         /* We wait for discovery to complete if IR firmware is loaded.
7747          * The sas topology events arrive before PD events, so we need time to
7748          * turn on the bit in ioc->pd_handles to indicate PD
7749          * Also, it maybe required to report Volumes ahead of physical
7750          * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
7751          */
7752         if (ioc->ir_firmware)
7753                 return 1;
7754
7755         /* if no Bios, then we don't need to wait */
7756         if (!ioc->bios_pg3.BiosVersion)
7757                 return 0;
7758
7759         /* Bios is present, then we drop down here.
7760          *
7761          * If there any entries in the Bios Page 2, then we wait
7762          * for discovery to complete.
7763          */
7764
7765         /* Current Boot Device */
7766         if ((ioc->bios_pg2.CurrentBootDeviceForm &
7767             MPI2_BIOSPAGE2_FORM_MASK) ==
7768             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7769         /* Request Boot Device */
7770            (ioc->bios_pg2.ReqBootDeviceForm &
7771             MPI2_BIOSPAGE2_FORM_MASK) ==
7772             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7773         /* Alternate Request Boot Device */
7774            (ioc->bios_pg2.ReqAltBootDeviceForm &
7775             MPI2_BIOSPAGE2_FORM_MASK) ==
7776             MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
7777                 return 0;
7778
7779         return 1;
7780 }
7781
7782 /**
7783  * _base_unmask_events - turn on notification for this event
7784  * @ioc: per adapter object
7785  * @event: firmware event
7786  *
7787  * The mask is stored in ioc->event_masks.
7788  */
7789 static void
7790 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
7791 {
7792         u32 desired_event;
7793
7794         if (event >= 128)
7795                 return;
7796
7797         desired_event = (1 << (event % 32));
7798
7799         if (event < 32)
7800                 ioc->event_masks[0] &= ~desired_event;
7801         else if (event < 64)
7802                 ioc->event_masks[1] &= ~desired_event;
7803         else if (event < 96)
7804                 ioc->event_masks[2] &= ~desired_event;
7805         else if (event < 128)
7806                 ioc->event_masks[3] &= ~desired_event;
7807 }
7808
7809 /**
7810  * _base_event_notification - send event notification
7811  * @ioc: per adapter object
7812  *
7813  * Return: 0 for success, non-zero for failure.
7814  */
7815 static int
7816 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
7817 {
7818         Mpi2EventNotificationRequest_t *mpi_request;
7819         u16 smid;
7820         int r = 0;
7821         int i, issue_diag_reset = 0;
7822
7823         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7824
7825         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7826                 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7827                 return -EAGAIN;
7828         }
7829
7830         smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7831         if (!smid) {
7832                 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7833                 return -EAGAIN;
7834         }
7835         ioc->base_cmds.status = MPT3_CMD_PENDING;
7836         mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7837         ioc->base_cmds.smid = smid;
7838         memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
7839         mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
7840         mpi_request->VF_ID = 0; /* TODO */
7841         mpi_request->VP_ID = 0;
7842         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7843                 mpi_request->EventMasks[i] =
7844                     cpu_to_le32(ioc->event_masks[i]);
7845         init_completion(&ioc->base_cmds.done);
7846         ioc->put_smid_default(ioc, smid);
7847         wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
7848         if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7849                 ioc_err(ioc, "%s: timeout\n", __func__);
7850                 _debug_dump_mf(mpi_request,
7851                     sizeof(Mpi2EventNotificationRequest_t)/4);
7852                 if (ioc->base_cmds.status & MPT3_CMD_RESET)
7853                         r = -EFAULT;
7854                 else
7855                         issue_diag_reset = 1;
7856
7857         } else
7858                 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
7859         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7860
7861         if (issue_diag_reset) {
7862                 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
7863                         return -EFAULT;
7864                 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
7865                         return -EFAULT;
7866                 r = -EAGAIN;
7867         }
7868         return r;
7869 }
7870
7871 /**
7872  * mpt3sas_base_validate_event_type - validating event types
7873  * @ioc: per adapter object
7874  * @event_type: firmware event
7875  *
7876  * This will turn on firmware event notification when application
7877  * ask for that event. We don't mask events that are already enabled.
7878  */
7879 void
7880 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
7881 {
7882         int i, j;
7883         u32 event_mask, desired_event;
7884         u8 send_update_to_fw;
7885
7886         for (i = 0, send_update_to_fw = 0; i <
7887             MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
7888                 event_mask = ~event_type[i];
7889                 desired_event = 1;
7890                 for (j = 0; j < 32; j++) {
7891                         if (!(event_mask & desired_event) &&
7892                             (ioc->event_masks[i] & desired_event)) {
7893                                 ioc->event_masks[i] &= ~desired_event;
7894                                 send_update_to_fw = 1;
7895                         }
7896                         desired_event = (desired_event << 1);
7897                 }
7898         }
7899
7900         if (!send_update_to_fw)
7901                 return;
7902
7903         mutex_lock(&ioc->base_cmds.mutex);
7904         _base_event_notification(ioc);
7905         mutex_unlock(&ioc->base_cmds.mutex);
7906 }
7907
7908 /**
7909  * _base_diag_reset - the "big hammer" start of day reset
7910  * @ioc: per adapter object
7911  *
7912  * Return: 0 for success, non-zero for failure.
7913  */
7914 static int
7915 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
7916 {
7917         u32 host_diagnostic;
7918         u32 ioc_state;
7919         u32 count;
7920         u32 hcb_size;
7921
7922         ioc_info(ioc, "sending diag reset !!\n");
7923
7924         pci_cfg_access_lock(ioc->pdev);
7925
7926         drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
7927
7928         count = 0;
7929         do {
7930                 /* Write magic sequence to WriteSequence register
7931                  * Loop until in diagnostic mode
7932                  */
7933                 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
7934                 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7935                 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
7936                 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
7937                 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
7938                 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
7939                 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
7940                 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
7941
7942                 /* wait 100 msec */
7943                 msleep(100);
7944
7945                 if (count++ > 20) {
7946                         ioc_info(ioc,
7947                             "Stop writing magic sequence after 20 retries\n");
7948                         _base_dump_reg_set(ioc);
7949                         goto out;
7950                 }
7951
7952                 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7953                 drsprintk(ioc,
7954                           ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7955                                    count, host_diagnostic));
7956
7957         } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
7958
7959         hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
7960
7961         drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
7962         writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
7963              &ioc->chip->HostDiagnostic);
7964
7965         /*This delay allows the chip PCIe hardware time to finish reset tasks*/
7966         msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
7967
7968         /* Approximately 300 second max wait */
7969         for (count = 0; count < (300000000 /
7970                 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
7971
7972                 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
7973
7974                 if (host_diagnostic == 0xFFFFFFFF) {
7975                         ioc_info(ioc,
7976                             "Invalid host diagnostic register value\n");
7977                         _base_dump_reg_set(ioc);
7978                         goto out;
7979                 }
7980                 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
7981                         break;
7982
7983                 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
7984         }
7985
7986         if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
7987
7988                 drsprintk(ioc,
7989                           ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
7990                 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
7991                 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
7992                 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
7993
7994                 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
7995                 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
7996                     &ioc->chip->HCBSize);
7997         }
7998
7999         drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
8000         writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
8001             &ioc->chip->HostDiagnostic);
8002
8003         drsprintk(ioc,
8004                   ioc_info(ioc, "disable writes to the diagnostic register\n"));
8005         writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
8006
8007         drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
8008         ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
8009         if (ioc_state) {
8010                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
8011                         __func__, ioc_state);
8012                 _base_dump_reg_set(ioc);
8013                 goto out;
8014         }
8015
8016         pci_cfg_access_unlock(ioc->pdev);
8017         ioc_info(ioc, "diag reset: SUCCESS\n");
8018         return 0;
8019
8020  out:
8021         pci_cfg_access_unlock(ioc->pdev);
8022         ioc_err(ioc, "diag reset: FAILED\n");
8023         return -EFAULT;
8024 }
8025
8026 /**
8027  * mpt3sas_base_make_ioc_ready - put controller in READY state
8028  * @ioc: per adapter object
8029  * @type: FORCE_BIG_HAMMER or SOFT_RESET
8030  *
8031  * Return: 0 for success, non-zero for failure.
8032  */
8033 int
8034 mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
8035 {
8036         u32 ioc_state;
8037         int rc;
8038         int count;
8039
8040         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8041
8042         if (ioc->pci_error_recovery)
8043                 return 0;
8044
8045         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8046         dhsprintk(ioc,
8047                   ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
8048                            __func__, ioc_state));
8049
8050         /* if in RESET state, it should move to READY state shortly */
8051         count = 0;
8052         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
8053                 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
8054                     MPI2_IOC_STATE_READY) {
8055                         if (count++ == 10) {
8056                                 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
8057                                         __func__, ioc_state);
8058                                 return -EFAULT;
8059                         }
8060                         ssleep(1);
8061                         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8062                 }
8063         }
8064
8065         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
8066                 return 0;
8067
8068         if (ioc_state & MPI2_DOORBELL_USED) {
8069                 ioc_info(ioc, "unexpected doorbell active!\n");
8070                 goto issue_diag_reset;
8071         }
8072
8073         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
8074                 mpt3sas_print_fault_code(ioc, ioc_state &
8075                     MPI2_DOORBELL_DATA_MASK);
8076                 goto issue_diag_reset;
8077         }
8078
8079         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
8080                 /*
8081                  * if host reset is invoked while watch dog thread is waiting
8082                  * for IOC state to be changed to Fault state then driver has
8083                  * to wait here for CoreDump state to clear otherwise reset
8084                  * will be issued to the FW and FW move the IOC state to
8085                  * reset state without copying the FW logs to coredump region.
8086                  */
8087                 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
8088                         mpt3sas_print_coredump_info(ioc, ioc_state &
8089                             MPI2_DOORBELL_DATA_MASK);
8090                         mpt3sas_base_wait_for_coredump_completion(ioc,
8091                             __func__);
8092                 }
8093                 goto issue_diag_reset;
8094         }
8095
8096         if (type == FORCE_BIG_HAMMER)
8097                 goto issue_diag_reset;
8098
8099         if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
8100                 if (!(_base_send_ioc_reset(ioc,
8101                     MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
8102                         return 0;
8103         }
8104
8105  issue_diag_reset:
8106         rc = _base_diag_reset(ioc);
8107         return rc;
8108 }
8109
8110 /**
8111  * _base_make_ioc_operational - put controller in OPERATIONAL state
8112  * @ioc: per adapter object
8113  *
8114  * Return: 0 for success, non-zero for failure.
8115  */
8116 static int
8117 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
8118 {
8119         int r, i, index, rc;
8120         unsigned long   flags;
8121         u32 reply_address;
8122         u16 smid;
8123         struct _tr_list *delayed_tr, *delayed_tr_next;
8124         struct _sc_list *delayed_sc, *delayed_sc_next;
8125         struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
8126         u8 hide_flag;
8127         struct adapter_reply_queue *reply_q;
8128         Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
8129
8130         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8131
8132         /* clean the delayed target reset list */
8133         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
8134             &ioc->delayed_tr_list, list) {
8135                 list_del(&delayed_tr->list);
8136                 kfree(delayed_tr);
8137         }
8138
8139
8140         list_for_each_entry_safe(delayed_tr, delayed_tr_next,
8141             &ioc->delayed_tr_volume_list, list) {
8142                 list_del(&delayed_tr->list);
8143                 kfree(delayed_tr);
8144         }
8145
8146         list_for_each_entry_safe(delayed_sc, delayed_sc_next,
8147             &ioc->delayed_sc_list, list) {
8148                 list_del(&delayed_sc->list);
8149                 kfree(delayed_sc);
8150         }
8151
8152         list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
8153             &ioc->delayed_event_ack_list, list) {
8154                 list_del(&delayed_event_ack->list);
8155                 kfree(delayed_event_ack);
8156         }
8157
8158         spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8159
8160         /* hi-priority queue */
8161         INIT_LIST_HEAD(&ioc->hpr_free_list);
8162         smid = ioc->hi_priority_smid;
8163         for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
8164                 ioc->hpr_lookup[i].cb_idx = 0xFF;
8165                 ioc->hpr_lookup[i].smid = smid;
8166                 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
8167                     &ioc->hpr_free_list);
8168         }
8169
8170         /* internal queue */
8171         INIT_LIST_HEAD(&ioc->internal_free_list);
8172         smid = ioc->internal_smid;
8173         for (i = 0; i < ioc->internal_depth; i++, smid++) {
8174                 ioc->internal_lookup[i].cb_idx = 0xFF;
8175                 ioc->internal_lookup[i].smid = smid;
8176                 list_add_tail(&ioc->internal_lookup[i].tracker_list,
8177                     &ioc->internal_free_list);
8178         }
8179
8180         spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8181
8182         /* initialize Reply Free Queue */
8183         for (i = 0, reply_address = (u32)ioc->reply_dma ;
8184             i < ioc->reply_free_queue_depth ; i++, reply_address +=
8185             ioc->reply_sz) {
8186                 ioc->reply_free[i] = cpu_to_le32(reply_address);
8187                 if (ioc->is_mcpu_endpoint)
8188                         _base_clone_reply_to_sys_mem(ioc,
8189                                         reply_address, i);
8190         }
8191
8192         /* initialize reply queues */
8193         if (ioc->is_driver_loading)
8194                 _base_assign_reply_queues(ioc);
8195
8196         /* initialize Reply Post Free Queue */
8197         index = 0;
8198         reply_post_free_contig = ioc->reply_post[0].reply_post_free;
8199         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8200                 /*
8201                  * If RDPQ is enabled, switch to the next allocation.
8202                  * Otherwise advance within the contiguous region.
8203                  */
8204                 if (ioc->rdpq_array_enable) {
8205                         reply_q->reply_post_free =
8206                                 ioc->reply_post[index++].reply_post_free;
8207                 } else {
8208                         reply_q->reply_post_free = reply_post_free_contig;
8209                         reply_post_free_contig += ioc->reply_post_queue_depth;
8210                 }
8211
8212                 reply_q->reply_post_host_index = 0;
8213                 for (i = 0; i < ioc->reply_post_queue_depth; i++)
8214                         reply_q->reply_post_free[i].Words =
8215                             cpu_to_le64(ULLONG_MAX);
8216                 if (!_base_is_controller_msix_enabled(ioc))
8217                         goto skip_init_reply_post_free_queue;
8218         }
8219  skip_init_reply_post_free_queue:
8220
8221         r = _base_send_ioc_init(ioc);
8222         if (r) {
8223                 /*
8224                  * No need to check IOC state for fault state & issue
8225                  * diag reset during host reset. This check is need
8226                  * only during driver load time.
8227                  */
8228                 if (!ioc->is_driver_loading)
8229                         return r;
8230
8231                 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8232                 if (rc || (_base_send_ioc_init(ioc)))
8233                         return r;
8234         }
8235
8236         /* initialize reply free host index */
8237         ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
8238         writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
8239
8240         /* initialize reply post host index */
8241         list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8242                 if (ioc->combined_reply_queue)
8243                         writel((reply_q->msix_index & 7)<<
8244                            MPI2_RPHI_MSIX_INDEX_SHIFT,
8245                            ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
8246                 else
8247                         writel(reply_q->msix_index <<
8248                                 MPI2_RPHI_MSIX_INDEX_SHIFT,
8249                                 &ioc->chip->ReplyPostHostIndex);
8250
8251                 if (!_base_is_controller_msix_enabled(ioc))
8252                         goto skip_init_reply_post_host_index;
8253         }
8254
8255  skip_init_reply_post_host_index:
8256
8257         mpt3sas_base_unmask_interrupts(ioc);
8258
8259         if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8260                 r = _base_display_fwpkg_version(ioc);
8261                 if (r)
8262                         return r;
8263         }
8264
8265         r = _base_static_config_pages(ioc);
8266         if (r)
8267                 return r;
8268
8269         r = _base_event_notification(ioc);
8270         if (r)
8271                 return r;
8272
8273         if (!ioc->shost_recovery) {
8274
8275                 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
8276                     == 0x80) {
8277                         hide_flag = (u8) (
8278                             le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
8279                             MFG_PAGE10_HIDE_SSDS_MASK);
8280                         if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
8281                                 ioc->mfg_pg10_hide_flag = hide_flag;
8282                 }
8283
8284                 ioc->wait_for_discovery_to_complete =
8285                     _base_determine_wait_on_discovery(ioc);
8286
8287                 return r; /* scan_start and scan_finished support */
8288         }
8289
8290         r = _base_send_port_enable(ioc);
8291         if (r)
8292                 return r;
8293
8294         return r;
8295 }
8296
8297 /**
8298  * mpt3sas_base_free_resources - free resources controller resources
8299  * @ioc: per adapter object
8300  */
8301 void
8302 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
8303 {
8304         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8305
8306         /* synchronizing freeing resource with pci_access_mutex lock */
8307         mutex_lock(&ioc->pci_access_mutex);
8308         if (ioc->chip_phys && ioc->chip) {
8309                 mpt3sas_base_mask_interrupts(ioc);
8310                 ioc->shost_recovery = 1;
8311                 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8312                 ioc->shost_recovery = 0;
8313         }
8314
8315         mpt3sas_base_unmap_resources(ioc);
8316         mutex_unlock(&ioc->pci_access_mutex);
8317         return;
8318 }
8319
8320 /**
8321  * mpt3sas_base_attach - attach controller instance
8322  * @ioc: per adapter object
8323  *
8324  * Return: 0 for success, non-zero for failure.
8325  */
8326 int
8327 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
8328 {
8329         int r, i, rc;
8330         int cpu_id, last_cpu_id = 0;
8331
8332         dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8333
8334         /* setup cpu_msix_table */
8335         ioc->cpu_count = num_online_cpus();
8336         for_each_online_cpu(cpu_id)
8337                 last_cpu_id = cpu_id;
8338         ioc->cpu_msix_table_sz = last_cpu_id + 1;
8339         ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
8340         ioc->reply_queue_count = 1;
8341         if (!ioc->cpu_msix_table) {
8342                 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
8343                 r = -ENOMEM;
8344                 goto out_free_resources;
8345         }
8346
8347         if (ioc->is_warpdrive) {
8348                 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
8349                     sizeof(resource_size_t *), GFP_KERNEL);
8350                 if (!ioc->reply_post_host_index) {
8351                         ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
8352                         r = -ENOMEM;
8353                         goto out_free_resources;
8354                 }
8355         }
8356
8357         ioc->smp_affinity_enable = smp_affinity_enable;
8358
8359         ioc->rdpq_array_enable_assigned = 0;
8360         ioc->use_32bit_dma = false;
8361         ioc->dma_mask = 64;
8362         if (ioc->is_aero_ioc)
8363                 ioc->base_readl = &_base_readl_aero;
8364         else
8365                 ioc->base_readl = &_base_readl;
8366         r = mpt3sas_base_map_resources(ioc);
8367         if (r)
8368                 goto out_free_resources;
8369
8370         pci_set_drvdata(ioc->pdev, ioc->shost);
8371         r = _base_get_ioc_facts(ioc);
8372         if (r) {
8373                 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8374                 if (rc || (_base_get_ioc_facts(ioc)))
8375                         goto out_free_resources;
8376         }
8377
8378         switch (ioc->hba_mpi_version_belonged) {
8379         case MPI2_VERSION:
8380                 ioc->build_sg_scmd = &_base_build_sg_scmd;
8381                 ioc->build_sg = &_base_build_sg;
8382                 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
8383                 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8384                 break;
8385         case MPI25_VERSION:
8386         case MPI26_VERSION:
8387                 /*
8388                  * In SAS3.0,
8389                  * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
8390                  * Target Status - all require the IEEE formatted scatter gather
8391                  * elements.
8392                  */
8393                 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
8394                 ioc->build_sg = &_base_build_sg_ieee;
8395                 ioc->build_nvme_prp = &_base_build_nvme_prp;
8396                 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
8397                 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
8398                 if (ioc->high_iops_queues)
8399                         ioc->get_msix_index_for_smlio =
8400                                         &_base_get_high_iops_msix_index;
8401                 else
8402                         ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8403                 break;
8404         }
8405         if (ioc->atomic_desc_capable) {
8406                 ioc->put_smid_default = &_base_put_smid_default_atomic;
8407                 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
8408                 ioc->put_smid_fast_path =
8409                                 &_base_put_smid_fast_path_atomic;
8410                 ioc->put_smid_hi_priority =
8411                                 &_base_put_smid_hi_priority_atomic;
8412         } else {
8413                 ioc->put_smid_default = &_base_put_smid_default;
8414                 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
8415                 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
8416                 if (ioc->is_mcpu_endpoint)
8417                         ioc->put_smid_scsi_io =
8418                                 &_base_put_smid_mpi_ep_scsi_io;
8419                 else
8420                         ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
8421         }
8422         /*
8423          * These function pointers for other requests that don't
8424          * the require IEEE scatter gather elements.
8425          *
8426          * For example Configuration Pages and SAS IOUNIT Control don't.
8427          */
8428         ioc->build_sg_mpi = &_base_build_sg;
8429         ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
8430
8431         r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8432         if (r)
8433                 goto out_free_resources;
8434
8435         ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
8436             sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
8437         if (!ioc->pfacts) {
8438                 r = -ENOMEM;
8439                 goto out_free_resources;
8440         }
8441
8442         for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
8443                 r = _base_get_port_facts(ioc, i);
8444                 if (r) {
8445                         rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8446                         if (rc || (_base_get_port_facts(ioc, i)))
8447                                 goto out_free_resources;
8448                 }
8449         }
8450
8451         r = _base_allocate_memory_pools(ioc);
8452         if (r)
8453                 goto out_free_resources;
8454
8455         if (irqpoll_weight > 0)
8456                 ioc->thresh_hold = irqpoll_weight;
8457         else
8458                 ioc->thresh_hold = ioc->hba_queue_depth/4;
8459
8460         _base_init_irqpolls(ioc);
8461         init_waitqueue_head(&ioc->reset_wq);
8462
8463         /* allocate memory pd handle bitmask list */
8464         ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8465         if (ioc->facts.MaxDevHandle % 8)
8466                 ioc->pd_handles_sz++;
8467         ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
8468             GFP_KERNEL);
8469         if (!ioc->pd_handles) {
8470                 r = -ENOMEM;
8471                 goto out_free_resources;
8472         }
8473         ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
8474             GFP_KERNEL);
8475         if (!ioc->blocking_handles) {
8476                 r = -ENOMEM;
8477                 goto out_free_resources;
8478         }
8479
8480         /* allocate memory for pending OS device add list */
8481         ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
8482         if (ioc->facts.MaxDevHandle % 8)
8483                 ioc->pend_os_device_add_sz++;
8484         ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
8485             GFP_KERNEL);
8486         if (!ioc->pend_os_device_add) {
8487                 r = -ENOMEM;
8488                 goto out_free_resources;
8489         }
8490
8491         ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
8492         ioc->device_remove_in_progress =
8493                 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
8494         if (!ioc->device_remove_in_progress) {
8495                 r = -ENOMEM;
8496                 goto out_free_resources;
8497         }
8498
8499         ioc->fwfault_debug = mpt3sas_fwfault_debug;
8500
8501         /* base internal command bits */
8502         mutex_init(&ioc->base_cmds.mutex);
8503         ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8504         ioc->base_cmds.status = MPT3_CMD_NOT_USED;
8505
8506         /* port_enable command bits */
8507         ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8508         ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
8509
8510         /* transport internal command bits */
8511         ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8512         ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
8513         mutex_init(&ioc->transport_cmds.mutex);
8514
8515         /* scsih internal command bits */
8516         ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8517         ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8518         mutex_init(&ioc->scsih_cmds.mutex);
8519
8520         /* task management internal command bits */
8521         ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8522         ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
8523         mutex_init(&ioc->tm_cmds.mutex);
8524
8525         /* config page internal command bits */
8526         ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8527         ioc->config_cmds.status = MPT3_CMD_NOT_USED;
8528         mutex_init(&ioc->config_cmds.mutex);
8529
8530         /* ctl module internal command bits */
8531         ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8532         ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
8533         ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
8534         mutex_init(&ioc->ctl_cmds.mutex);
8535
8536         if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
8537             !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
8538             !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
8539             !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
8540                 r = -ENOMEM;
8541                 goto out_free_resources;
8542         }
8543
8544         for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
8545                 ioc->event_masks[i] = -1;
8546
8547         /* here we enable the events we care about */
8548         _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
8549         _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
8550         _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
8551         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
8552         _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
8553         _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
8554         _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
8555         _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
8556         _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
8557         _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
8558         _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
8559         _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
8560         _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
8561         if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
8562                 if (ioc->is_gen35_ioc) {
8563                         _base_unmask_events(ioc,
8564                                 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
8565                         _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
8566                         _base_unmask_events(ioc,
8567                                 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
8568                 }
8569         }
8570         r = _base_make_ioc_operational(ioc);
8571         if (r == -EAGAIN) {
8572                 r = _base_make_ioc_operational(ioc);
8573                 if (r)
8574                         goto out_free_resources;
8575         }
8576
8577         /*
8578          * Copy current copy of IOCFacts in prev_fw_facts
8579          * and it will be used during online firmware upgrade.
8580          */
8581         memcpy(&ioc->prev_fw_facts, &ioc->facts,
8582             sizeof(struct mpt3sas_facts));
8583
8584         ioc->non_operational_loop = 0;
8585         ioc->ioc_coredump_loop = 0;
8586         ioc->got_task_abort_from_ioctl = 0;
8587         return 0;
8588
8589  out_free_resources:
8590
8591         ioc->remove_host = 1;
8592
8593         mpt3sas_base_free_resources(ioc);
8594         _base_release_memory_pools(ioc);
8595         pci_set_drvdata(ioc->pdev, NULL);
8596         kfree(ioc->cpu_msix_table);
8597         if (ioc->is_warpdrive)
8598                 kfree(ioc->reply_post_host_index);
8599         kfree(ioc->pd_handles);
8600         kfree(ioc->blocking_handles);
8601         kfree(ioc->device_remove_in_progress);
8602         kfree(ioc->pend_os_device_add);
8603         kfree(ioc->tm_cmds.reply);
8604         kfree(ioc->transport_cmds.reply);
8605         kfree(ioc->scsih_cmds.reply);
8606         kfree(ioc->config_cmds.reply);
8607         kfree(ioc->base_cmds.reply);
8608         kfree(ioc->port_enable_cmds.reply);
8609         kfree(ioc->ctl_cmds.reply);
8610         kfree(ioc->ctl_cmds.sense);
8611         kfree(ioc->pfacts);
8612         ioc->ctl_cmds.reply = NULL;
8613         ioc->base_cmds.reply = NULL;
8614         ioc->tm_cmds.reply = NULL;
8615         ioc->scsih_cmds.reply = NULL;
8616         ioc->transport_cmds.reply = NULL;
8617         ioc->config_cmds.reply = NULL;
8618         ioc->pfacts = NULL;
8619         return r;
8620 }
8621
8622
8623 /**
8624  * mpt3sas_base_detach - remove controller instance
8625  * @ioc: per adapter object
8626  */
8627 void
8628 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
8629 {
8630         dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8631
8632         mpt3sas_base_stop_watchdog(ioc);
8633         mpt3sas_base_free_resources(ioc);
8634         _base_release_memory_pools(ioc);
8635         mpt3sas_free_enclosure_list(ioc);
8636         pci_set_drvdata(ioc->pdev, NULL);
8637         kfree(ioc->cpu_msix_table);
8638         if (ioc->is_warpdrive)
8639                 kfree(ioc->reply_post_host_index);
8640         kfree(ioc->pd_handles);
8641         kfree(ioc->blocking_handles);
8642         kfree(ioc->device_remove_in_progress);
8643         kfree(ioc->pend_os_device_add);
8644         kfree(ioc->pfacts);
8645         kfree(ioc->ctl_cmds.reply);
8646         kfree(ioc->ctl_cmds.sense);
8647         kfree(ioc->base_cmds.reply);
8648         kfree(ioc->port_enable_cmds.reply);
8649         kfree(ioc->tm_cmds.reply);
8650         kfree(ioc->transport_cmds.reply);
8651         kfree(ioc->scsih_cmds.reply);
8652         kfree(ioc->config_cmds.reply);
8653 }
8654
8655 /**
8656  * _base_pre_reset_handler - pre reset handler
8657  * @ioc: per adapter object
8658  */
8659 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
8660 {
8661         mpt3sas_scsih_pre_reset_handler(ioc);
8662         mpt3sas_ctl_pre_reset_handler(ioc);
8663         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
8664 }
8665
8666 /**
8667  * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
8668  * @ioc: per adapter object
8669  */
8670 static void
8671 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
8672 {
8673         dtmprintk(ioc,
8674             ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
8675         if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
8676                 ioc->transport_cmds.status |= MPT3_CMD_RESET;
8677                 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
8678                 complete(&ioc->transport_cmds.done);
8679         }
8680         if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
8681                 ioc->base_cmds.status |= MPT3_CMD_RESET;
8682                 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
8683                 complete(&ioc->base_cmds.done);
8684         }
8685         if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
8686                 ioc->port_enable_failed = 1;
8687                 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
8688                 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
8689                 if (ioc->is_driver_loading) {
8690                         ioc->start_scan_failed =
8691                                 MPI2_IOCSTATUS_INTERNAL_ERROR;
8692                         ioc->start_scan = 0;
8693                 } else {
8694                         complete(&ioc->port_enable_cmds.done);
8695                 }
8696         }
8697         if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
8698                 ioc->config_cmds.status |= MPT3_CMD_RESET;
8699                 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
8700                 ioc->config_cmds.smid = USHRT_MAX;
8701                 complete(&ioc->config_cmds.done);
8702         }
8703 }
8704
8705 /**
8706  * _base_clear_outstanding_commands - clear all outstanding commands
8707  * @ioc: per adapter object
8708  */
8709 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
8710 {
8711         mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
8712         mpt3sas_ctl_clear_outstanding_ioctls(ioc);
8713         _base_clear_outstanding_mpt_commands(ioc);
8714 }
8715
8716 /**
8717  * _base_reset_done_handler - reset done handler
8718  * @ioc: per adapter object
8719  */
8720 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
8721 {
8722         mpt3sas_scsih_reset_done_handler(ioc);
8723         mpt3sas_ctl_reset_done_handler(ioc);
8724         dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
8725 }
8726
8727 /**
8728  * mpt3sas_wait_for_commands_to_complete - reset controller
8729  * @ioc: Pointer to MPT_ADAPTER structure
8730  *
8731  * This function is waiting 10s for all pending commands to complete
8732  * prior to putting controller in reset.
8733  */
8734 void
8735 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
8736 {
8737         u32 ioc_state;
8738
8739         ioc->pending_io_count = 0;
8740
8741         ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8742         if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
8743                 return;
8744
8745         /* pending command count */
8746         ioc->pending_io_count = scsi_host_busy(ioc->shost);
8747
8748         if (!ioc->pending_io_count)
8749                 return;
8750
8751         /* wait for pending commands to complete */
8752         wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
8753 }
8754
8755 /**
8756  * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
8757  *     attributes during online firmware upgrade and update the corresponding
8758  *     IOC variables accordingly.
8759  *
8760  * @ioc: Pointer to MPT_ADAPTER structure
8761  */
8762 static int
8763 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
8764 {
8765         u16 pd_handles_sz;
8766         void *pd_handles = NULL, *blocking_handles = NULL;
8767         void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
8768         struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
8769
8770         if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
8771                 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8772                 if (ioc->facts.MaxDevHandle % 8)
8773                         pd_handles_sz++;
8774
8775                 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
8776                     GFP_KERNEL);
8777                 if (!pd_handles) {
8778                         ioc_info(ioc,
8779                             "Unable to allocate the memory for pd_handles of sz: %d\n",
8780                             pd_handles_sz);
8781                         return -ENOMEM;
8782                 }
8783                 memset(pd_handles + ioc->pd_handles_sz, 0,
8784                     (pd_handles_sz - ioc->pd_handles_sz));
8785                 ioc->pd_handles = pd_handles;
8786
8787                 blocking_handles = krealloc(ioc->blocking_handles,
8788                     pd_handles_sz, GFP_KERNEL);
8789                 if (!blocking_handles) {
8790                         ioc_info(ioc,
8791                             "Unable to allocate the memory for "
8792                             "blocking_handles of sz: %d\n",
8793                             pd_handles_sz);
8794                         return -ENOMEM;
8795                 }
8796                 memset(blocking_handles + ioc->pd_handles_sz, 0,
8797                     (pd_handles_sz - ioc->pd_handles_sz));
8798                 ioc->blocking_handles = blocking_handles;
8799                 ioc->pd_handles_sz = pd_handles_sz;
8800
8801                 pend_os_device_add = krealloc(ioc->pend_os_device_add,
8802                     pd_handles_sz, GFP_KERNEL);
8803                 if (!pend_os_device_add) {
8804                         ioc_info(ioc,
8805                             "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
8806                             pd_handles_sz);
8807                         return -ENOMEM;
8808                 }
8809                 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
8810                     (pd_handles_sz - ioc->pend_os_device_add_sz));
8811                 ioc->pend_os_device_add = pend_os_device_add;
8812                 ioc->pend_os_device_add_sz = pd_handles_sz;
8813
8814                 device_remove_in_progress = krealloc(
8815                     ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
8816                 if (!device_remove_in_progress) {
8817                         ioc_info(ioc,
8818                             "Unable to allocate the memory for "
8819                             "device_remove_in_progress of sz: %d\n "
8820                             , pd_handles_sz);
8821                         return -ENOMEM;
8822                 }
8823                 memset(device_remove_in_progress +
8824                     ioc->device_remove_in_progress_sz, 0,
8825                     (pd_handles_sz - ioc->device_remove_in_progress_sz));
8826                 ioc->device_remove_in_progress = device_remove_in_progress;
8827                 ioc->device_remove_in_progress_sz = pd_handles_sz;
8828         }
8829
8830         memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
8831         return 0;
8832 }
8833
8834 /**
8835  * mpt3sas_base_hard_reset_handler - reset controller
8836  * @ioc: Pointer to MPT_ADAPTER structure
8837  * @type: FORCE_BIG_HAMMER or SOFT_RESET
8838  *
8839  * Return: 0 for success, non-zero for failure.
8840  */
8841 int
8842 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
8843         enum reset_type type)
8844 {
8845         int r;
8846         unsigned long flags;
8847         u32 ioc_state;
8848         u8 is_fault = 0, is_trigger = 0;
8849
8850         dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
8851
8852         if (ioc->pci_error_recovery) {
8853                 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
8854                 r = 0;
8855                 goto out_unlocked;
8856         }
8857
8858         if (mpt3sas_fwfault_debug)
8859                 mpt3sas_halt_firmware(ioc);
8860
8861         /* wait for an active reset in progress to complete */
8862         mutex_lock(&ioc->reset_in_progress_mutex);
8863
8864         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8865         ioc->shost_recovery = 1;
8866         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8867
8868         if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8869             MPT3_DIAG_BUFFER_IS_REGISTERED) &&
8870             (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8871             MPT3_DIAG_BUFFER_IS_RELEASED))) {
8872                 is_trigger = 1;
8873                 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8874                 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
8875                     (ioc_state & MPI2_IOC_STATE_MASK) ==
8876                     MPI2_IOC_STATE_COREDUMP) {
8877                         is_fault = 1;
8878                         ioc->htb_rel.trigger_info_dwords[1] =
8879                             (ioc_state & MPI2_DOORBELL_DATA_MASK);
8880                 }
8881         }
8882         _base_pre_reset_handler(ioc);
8883         mpt3sas_wait_for_commands_to_complete(ioc);
8884         mpt3sas_base_mask_interrupts(ioc);
8885         mpt3sas_base_pause_mq_polling(ioc);
8886         r = mpt3sas_base_make_ioc_ready(ioc, type);
8887         if (r)
8888                 goto out;
8889         _base_clear_outstanding_commands(ioc);
8890
8891         /* If this hard reset is called while port enable is active, then
8892          * there is no reason to call make_ioc_operational
8893          */
8894         if (ioc->is_driver_loading && ioc->port_enable_failed) {
8895                 ioc->remove_host = 1;
8896                 r = -EFAULT;
8897                 goto out;
8898         }
8899         r = _base_get_ioc_facts(ioc);
8900         if (r)
8901                 goto out;
8902
8903         r = _base_check_ioc_facts_changes(ioc);
8904         if (r) {
8905                 ioc_info(ioc,
8906                     "Some of the parameters got changed in this new firmware"
8907                     " image and it requires system reboot\n");
8908                 goto out;
8909         }
8910         if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
8911                 panic("%s: Issue occurred with flashing controller firmware."
8912                       "Please reboot the system and ensure that the correct"
8913                       " firmware version is running\n", ioc->name);
8914
8915         r = _base_make_ioc_operational(ioc);
8916         if (!r)
8917                 _base_reset_done_handler(ioc);
8918
8919  out:
8920         ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
8921
8922         spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8923         ioc->shost_recovery = 0;
8924         spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8925         ioc->ioc_reset_count++;
8926         mutex_unlock(&ioc->reset_in_progress_mutex);
8927         mpt3sas_base_resume_mq_polling(ioc);
8928
8929  out_unlocked:
8930         if ((r == 0) && is_trigger) {
8931                 if (is_fault)
8932                         mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
8933                 else
8934                         mpt3sas_trigger_master(ioc,
8935                             MASTER_TRIGGER_ADAPTER_RESET);
8936         }
8937         dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
8938         return r;
8939 }
This page took 0.558455 seconds and 4 git commands to generate.