]> Git Repo - linux.git/blob - drivers/scsi/smartpqi/smartpqi_init.c
net: wan: Add framer framework support
[linux.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    driver for Microchip PQI-based storage controllers
4  *    Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to [email protected]
9  *
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35
36 #define DRIVER_VERSION          "2.1.24-046"
37 #define DRIVER_MAJOR            2
38 #define DRIVER_MINOR            1
39 #define DRIVER_RELEASE          24
40 #define DRIVER_REVISION         46
41
42 #define DRIVER_NAME             "Microchip SmartPQI Driver (v" \
43                                 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT       "smartpqi"
45
46 #define PQI_EXTRA_SGL_MEMORY    (12 * sizeof(struct pqi_sg_descriptor))
47
48 #define PQI_POST_RESET_DELAY_SECS                       5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS      10
50
51 #define PQI_NO_COMPLETION       ((void *)-1)
52
53 MODULE_AUTHOR("Microchip");
54 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
55         DRIVER_VERSION);
56 MODULE_VERSION(DRIVER_VERSION);
57 MODULE_LICENSE("GPL");
58
59 struct pqi_cmd_priv {
60         int this_residual;
61 };
62
63 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
64 {
65         return scsi_cmd_priv(cmd);
66 }
67
68 static void pqi_verify_structures(void);
69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
70         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
71 static void pqi_ctrl_offline_worker(struct work_struct *work);
72 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_scan_start(struct Scsi_Host *shost);
74 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
75         struct pqi_queue_group *queue_group, enum pqi_io_path path,
76         struct pqi_io_request *io_request);
77 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
78         struct pqi_iu_header *request, unsigned int flags,
79         struct pqi_raid_error_info *error_info);
80 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
81         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
82         unsigned int cdb_length, struct pqi_queue_group *queue_group,
83         struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
84 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
85         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
86         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
87         struct pqi_scsi_dev_raid_map_data *rmd);
88 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
89         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
90         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
91         struct pqi_scsi_dev_raid_map_data *rmd);
92 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
93 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
94 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
95 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
96 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
97 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
98 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
99         struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
100 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
101 static void pqi_tmf_worker(struct work_struct *work);
102
103 /* for flags argument to pqi_submit_raid_request_synchronous() */
104 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
105
106 static struct scsi_transport_template *pqi_sas_transport_template;
107
108 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
109
110 enum pqi_lockup_action {
111         NONE,
112         REBOOT,
113         PANIC
114 };
115
116 static enum pqi_lockup_action pqi_lockup_action = NONE;
117
118 static struct {
119         enum pqi_lockup_action  action;
120         char                    *name;
121 } pqi_lockup_actions[] = {
122         {
123                 .action = NONE,
124                 .name = "none",
125         },
126         {
127                 .action = REBOOT,
128                 .name = "reboot",
129         },
130         {
131                 .action = PANIC,
132                 .name = "panic",
133         },
134 };
135
136 static unsigned int pqi_supported_event_types[] = {
137         PQI_EVENT_TYPE_HOTPLUG,
138         PQI_EVENT_TYPE_HARDWARE,
139         PQI_EVENT_TYPE_PHYSICAL_DEVICE,
140         PQI_EVENT_TYPE_LOGICAL_DEVICE,
141         PQI_EVENT_TYPE_OFA,
142         PQI_EVENT_TYPE_AIO_STATE_CHANGE,
143         PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
144 };
145
146 static int pqi_disable_device_id_wildcards;
147 module_param_named(disable_device_id_wildcards,
148         pqi_disable_device_id_wildcards, int, 0644);
149 MODULE_PARM_DESC(disable_device_id_wildcards,
150         "Disable device ID wildcards.");
151
152 static int pqi_disable_heartbeat;
153 module_param_named(disable_heartbeat,
154         pqi_disable_heartbeat, int, 0644);
155 MODULE_PARM_DESC(disable_heartbeat,
156         "Disable heartbeat.");
157
158 static int pqi_disable_ctrl_shutdown;
159 module_param_named(disable_ctrl_shutdown,
160         pqi_disable_ctrl_shutdown, int, 0644);
161 MODULE_PARM_DESC(disable_ctrl_shutdown,
162         "Disable controller shutdown when controller locked up.");
163
164 static char *pqi_lockup_action_param;
165 module_param_named(lockup_action,
166         pqi_lockup_action_param, charp, 0644);
167 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
168         "\t\tSupported: none, reboot, panic\n"
169         "\t\tDefault: none");
170
171 static int pqi_expose_ld_first;
172 module_param_named(expose_ld_first,
173         pqi_expose_ld_first, int, 0644);
174 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
175
176 static int pqi_hide_vsep;
177 module_param_named(hide_vsep,
178         pqi_hide_vsep, int, 0644);
179 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
180
181 static int pqi_disable_managed_interrupts;
182 module_param_named(disable_managed_interrupts,
183         pqi_disable_managed_interrupts, int, 0644);
184 MODULE_PARM_DESC(disable_managed_interrupts,
185         "Disable the kernel automatically assigning SMP affinity to IRQs.");
186
187 static unsigned int pqi_ctrl_ready_timeout_secs;
188 module_param_named(ctrl_ready_timeout,
189         pqi_ctrl_ready_timeout_secs, uint, 0644);
190 MODULE_PARM_DESC(ctrl_ready_timeout,
191         "Timeout in seconds for driver to wait for controller ready.");
192
193 static char *raid_levels[] = {
194         "RAID-0",
195         "RAID-4",
196         "RAID-1(1+0)",
197         "RAID-5",
198         "RAID-5+1",
199         "RAID-6",
200         "RAID-1(Triple)",
201 };
202
203 static char *pqi_raid_level_to_string(u8 raid_level)
204 {
205         if (raid_level < ARRAY_SIZE(raid_levels))
206                 return raid_levels[raid_level];
207
208         return "RAID UNKNOWN";
209 }
210
211 #define SA_RAID_0               0
212 #define SA_RAID_4               1
213 #define SA_RAID_1               2       /* also used for RAID 10 */
214 #define SA_RAID_5               3       /* also used for RAID 50 */
215 #define SA_RAID_51              4
216 #define SA_RAID_6               5       /* also used for RAID 60 */
217 #define SA_RAID_TRIPLE          6       /* also used for RAID 1+0 Triple */
218 #define SA_RAID_MAX             SA_RAID_TRIPLE
219 #define SA_RAID_UNKNOWN         0xff
220
221 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
222 {
223         pqi_prep_for_scsi_done(scmd);
224         scsi_done(scmd);
225 }
226
227 static inline void pqi_disable_write_same(struct scsi_device *sdev)
228 {
229         sdev->no_write_same = 1;
230 }
231
232 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
233 {
234         return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
235 }
236
237 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
238 {
239         return !device->is_physical_device;
240 }
241
242 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
243 {
244         return scsi3addr[2] != 0;
245 }
246
247 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
248 {
249         return !ctrl_info->controller_online;
250 }
251
252 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
253 {
254         if (ctrl_info->controller_online)
255                 if (!sis_is_firmware_running(ctrl_info))
256                         pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
257 }
258
259 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
260 {
261         return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
262 }
263
264 #define PQI_DRIVER_SCRATCH_PQI_MODE                     0x1
265 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED          0x2
266
267 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
268 {
269         return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
270 }
271
272 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
273         enum pqi_ctrl_mode mode)
274 {
275         u32 driver_scratch;
276
277         driver_scratch = sis_read_driver_scratch(ctrl_info);
278
279         if (mode == PQI_MODE)
280                 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
281         else
282                 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
283
284         sis_write_driver_scratch(ctrl_info, driver_scratch);
285 }
286
287 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
288 {
289         return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
290 }
291
292 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
293 {
294         u32 driver_scratch;
295
296         driver_scratch = sis_read_driver_scratch(ctrl_info);
297
298         if (is_supported)
299                 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
300         else
301                 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
302
303         sis_write_driver_scratch(ctrl_info, driver_scratch);
304 }
305
306 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
307 {
308         ctrl_info->scan_blocked = true;
309         mutex_lock(&ctrl_info->scan_mutex);
310 }
311
312 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
313 {
314         ctrl_info->scan_blocked = false;
315         mutex_unlock(&ctrl_info->scan_mutex);
316 }
317
318 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
319 {
320         return ctrl_info->scan_blocked;
321 }
322
323 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
324 {
325         mutex_lock(&ctrl_info->lun_reset_mutex);
326 }
327
328 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
329 {
330         mutex_unlock(&ctrl_info->lun_reset_mutex);
331 }
332
333 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
334 {
335         struct Scsi_Host *shost;
336         unsigned int num_loops;
337         int msecs_sleep;
338
339         shost = ctrl_info->scsi_host;
340
341         scsi_block_requests(shost);
342
343         num_loops = 0;
344         msecs_sleep = 20;
345         while (scsi_host_busy(shost)) {
346                 num_loops++;
347                 if (num_loops == 10)
348                         msecs_sleep = 500;
349                 msleep(msecs_sleep);
350         }
351 }
352
353 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
354 {
355         scsi_unblock_requests(ctrl_info->scsi_host);
356 }
357
358 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
359 {
360         atomic_inc(&ctrl_info->num_busy_threads);
361 }
362
363 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
364 {
365         atomic_dec(&ctrl_info->num_busy_threads);
366 }
367
368 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
369 {
370         return ctrl_info->block_requests;
371 }
372
373 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
374 {
375         ctrl_info->block_requests = true;
376 }
377
378 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
379 {
380         ctrl_info->block_requests = false;
381         wake_up_all(&ctrl_info->block_requests_wait);
382 }
383
384 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
385 {
386         if (!pqi_ctrl_blocked(ctrl_info))
387                 return;
388
389         atomic_inc(&ctrl_info->num_blocked_threads);
390         wait_event(ctrl_info->block_requests_wait,
391                 !pqi_ctrl_blocked(ctrl_info));
392         atomic_dec(&ctrl_info->num_blocked_threads);
393 }
394
395 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS                10
396
397 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
398 {
399         unsigned long start_jiffies;
400         unsigned long warning_timeout;
401         bool displayed_warning;
402
403         displayed_warning = false;
404         start_jiffies = jiffies;
405         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
406
407         while (atomic_read(&ctrl_info->num_busy_threads) >
408                 atomic_read(&ctrl_info->num_blocked_threads)) {
409                 if (time_after(jiffies, warning_timeout)) {
410                         dev_warn(&ctrl_info->pci_dev->dev,
411                                 "waiting %u seconds for driver activity to quiesce\n",
412                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
413                         displayed_warning = true;
414                         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
415                 }
416                 usleep_range(1000, 2000);
417         }
418
419         if (displayed_warning)
420                 dev_warn(&ctrl_info->pci_dev->dev,
421                         "driver activity quiesced after waiting for %u seconds\n",
422                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
423 }
424
425 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
426 {
427         return device->device_offline;
428 }
429
430 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
431 {
432         mutex_lock(&ctrl_info->ofa_mutex);
433 }
434
435 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
436 {
437         mutex_unlock(&ctrl_info->ofa_mutex);
438 }
439
440 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
441 {
442         mutex_lock(&ctrl_info->ofa_mutex);
443         mutex_unlock(&ctrl_info->ofa_mutex);
444 }
445
446 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
447 {
448         return mutex_is_locked(&ctrl_info->ofa_mutex);
449 }
450
451 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
452 {
453         device->in_remove = true;
454 }
455
456 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
457 {
458         return device->in_remove;
459 }
460
461 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
462 {
463         device->in_reset[lun] = true;
464 }
465
466 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
467 {
468         device->in_reset[lun] = false;
469 }
470
471 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
472 {
473         return device->in_reset[lun];
474 }
475
476 static inline int pqi_event_type_to_event_index(unsigned int event_type)
477 {
478         int index;
479
480         for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
481                 if (event_type == pqi_supported_event_types[index])
482                         return index;
483
484         return -1;
485 }
486
487 static inline bool pqi_is_supported_event(unsigned int event_type)
488 {
489         return pqi_event_type_to_event_index(event_type) != -1;
490 }
491
492 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
493         unsigned long delay)
494 {
495         if (pqi_ctrl_offline(ctrl_info))
496                 return;
497
498         schedule_delayed_work(&ctrl_info->rescan_work, delay);
499 }
500
501 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
502 {
503         pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
504 }
505
506 #define PQI_RESCAN_WORK_DELAY   (10 * HZ)
507
508 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
509 {
510         pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
511 }
512
513 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
514 {
515         cancel_delayed_work_sync(&ctrl_info->rescan_work);
516 }
517
518 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
519 {
520         if (!ctrl_info->heartbeat_counter)
521                 return 0;
522
523         return readl(ctrl_info->heartbeat_counter);
524 }
525
526 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
527 {
528         return readb(ctrl_info->soft_reset_status);
529 }
530
531 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
532 {
533         u8 status;
534
535         status = pqi_read_soft_reset_status(ctrl_info);
536         status &= ~PQI_SOFT_RESET_ABORT;
537         writeb(status, ctrl_info->soft_reset_status);
538 }
539
540 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
541 {
542         bool io_high_prio;
543         int priority_class;
544
545         io_high_prio = false;
546
547         if (device->ncq_prio_enable) {
548                 priority_class =
549                         IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
550                 if (priority_class == IOPRIO_CLASS_RT) {
551                         /* Set NCQ priority for read/write commands. */
552                         switch (scmd->cmnd[0]) {
553                         case WRITE_16:
554                         case READ_16:
555                         case WRITE_12:
556                         case READ_12:
557                         case WRITE_10:
558                         case READ_10:
559                         case WRITE_6:
560                         case READ_6:
561                                 io_high_prio = true;
562                                 break;
563                         }
564                 }
565         }
566
567         return io_high_prio;
568 }
569
570 static int pqi_map_single(struct pci_dev *pci_dev,
571         struct pqi_sg_descriptor *sg_descriptor, void *buffer,
572         size_t buffer_length, enum dma_data_direction data_direction)
573 {
574         dma_addr_t bus_address;
575
576         if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
577                 return 0;
578
579         bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
580                 data_direction);
581         if (dma_mapping_error(&pci_dev->dev, bus_address))
582                 return -ENOMEM;
583
584         put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
585         put_unaligned_le32(buffer_length, &sg_descriptor->length);
586         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
587
588         return 0;
589 }
590
591 static void pqi_pci_unmap(struct pci_dev *pci_dev,
592         struct pqi_sg_descriptor *descriptors, int num_descriptors,
593         enum dma_data_direction data_direction)
594 {
595         int i;
596
597         if (data_direction == DMA_NONE)
598                 return;
599
600         for (i = 0; i < num_descriptors; i++)
601                 dma_unmap_single(&pci_dev->dev,
602                         (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
603                         get_unaligned_le32(&descriptors[i].length),
604                         data_direction);
605 }
606
607 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
608         struct pqi_raid_path_request *request, u8 cmd,
609         u8 *scsi3addr, void *buffer, size_t buffer_length,
610         u16 vpd_page, enum dma_data_direction *dir)
611 {
612         u8 *cdb;
613         size_t cdb_length = buffer_length;
614
615         memset(request, 0, sizeof(*request));
616
617         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
618         put_unaligned_le16(offsetof(struct pqi_raid_path_request,
619                 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
620                 &request->header.iu_length);
621         put_unaligned_le32(buffer_length, &request->buffer_length);
622         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
623         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
624         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
625
626         cdb = request->cdb;
627
628         switch (cmd) {
629         case INQUIRY:
630                 request->data_direction = SOP_READ_FLAG;
631                 cdb[0] = INQUIRY;
632                 if (vpd_page & VPD_PAGE) {
633                         cdb[1] = 0x1;
634                         cdb[2] = (u8)vpd_page;
635                 }
636                 cdb[4] = (u8)cdb_length;
637                 break;
638         case CISS_REPORT_LOG:
639         case CISS_REPORT_PHYS:
640                 request->data_direction = SOP_READ_FLAG;
641                 cdb[0] = cmd;
642                 if (cmd == CISS_REPORT_PHYS) {
643                         if (ctrl_info->rpl_extended_format_4_5_supported)
644                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
645                         else
646                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
647                 } else {
648                         cdb[1] = ctrl_info->ciss_report_log_flags;
649                 }
650                 put_unaligned_be32(cdb_length, &cdb[6]);
651                 break;
652         case CISS_GET_RAID_MAP:
653                 request->data_direction = SOP_READ_FLAG;
654                 cdb[0] = CISS_READ;
655                 cdb[1] = CISS_GET_RAID_MAP;
656                 put_unaligned_be32(cdb_length, &cdb[6]);
657                 break;
658         case SA_FLUSH_CACHE:
659                 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
660                 request->data_direction = SOP_WRITE_FLAG;
661                 cdb[0] = BMIC_WRITE;
662                 cdb[6] = BMIC_FLUSH_CACHE;
663                 put_unaligned_be16(cdb_length, &cdb[7]);
664                 break;
665         case BMIC_SENSE_DIAG_OPTIONS:
666                 cdb_length = 0;
667                 fallthrough;
668         case BMIC_IDENTIFY_CONTROLLER:
669         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
670         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
671         case BMIC_SENSE_FEATURE:
672                 request->data_direction = SOP_READ_FLAG;
673                 cdb[0] = BMIC_READ;
674                 cdb[6] = cmd;
675                 put_unaligned_be16(cdb_length, &cdb[7]);
676                 break;
677         case BMIC_SET_DIAG_OPTIONS:
678                 cdb_length = 0;
679                 fallthrough;
680         case BMIC_WRITE_HOST_WELLNESS:
681                 request->data_direction = SOP_WRITE_FLAG;
682                 cdb[0] = BMIC_WRITE;
683                 cdb[6] = cmd;
684                 put_unaligned_be16(cdb_length, &cdb[7]);
685                 break;
686         case BMIC_CSMI_PASSTHRU:
687                 request->data_direction = SOP_BIDIRECTIONAL;
688                 cdb[0] = BMIC_WRITE;
689                 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
690                 cdb[6] = cmd;
691                 put_unaligned_be16(cdb_length, &cdb[7]);
692                 break;
693         default:
694                 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
695                 break;
696         }
697
698         switch (request->data_direction) {
699         case SOP_READ_FLAG:
700                 *dir = DMA_FROM_DEVICE;
701                 break;
702         case SOP_WRITE_FLAG:
703                 *dir = DMA_TO_DEVICE;
704                 break;
705         case SOP_NO_DIRECTION_FLAG:
706                 *dir = DMA_NONE;
707                 break;
708         default:
709                 *dir = DMA_BIDIRECTIONAL;
710                 break;
711         }
712
713         return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
714                 buffer, buffer_length, *dir);
715 }
716
717 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
718 {
719         io_request->scmd = NULL;
720         io_request->status = 0;
721         io_request->error_info = NULL;
722         io_request->raid_bypass = false;
723 }
724
725 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
726 {
727         struct pqi_io_request *io_request;
728         u16 i;
729
730         if (scmd) { /* SML I/O request */
731                 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
732
733                 i = blk_mq_unique_tag_to_tag(blk_tag);
734                 io_request = &ctrl_info->io_request_pool[i];
735                 if (atomic_inc_return(&io_request->refcount) > 1) {
736                         atomic_dec(&io_request->refcount);
737                         return NULL;
738                 }
739         } else { /* IOCTL or driver internal request */
740                 /*
741                  * benignly racy - may have to wait for an open slot.
742                  * command slot range is scsi_ml_can_queue -
743                  *         [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
744                  */
745                 i = 0;
746                 while (1) {
747                         io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
748                         if (atomic_inc_return(&io_request->refcount) == 1)
749                                 break;
750                         atomic_dec(&io_request->refcount);
751                         i = (i + 1) % PQI_RESERVED_IO_SLOTS;
752                 }
753         }
754
755         if (io_request)
756                 pqi_reinit_io_request(io_request);
757
758         return io_request;
759 }
760
761 static void pqi_free_io_request(struct pqi_io_request *io_request)
762 {
763         atomic_dec(&io_request->refcount);
764 }
765
766 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
767         u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
768         struct pqi_raid_error_info *error_info)
769 {
770         int rc;
771         struct pqi_raid_path_request request;
772         enum dma_data_direction dir;
773
774         rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
775                 buffer, buffer_length, vpd_page, &dir);
776         if (rc)
777                 return rc;
778
779         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
780
781         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
782
783         return rc;
784 }
785
786 /* helper functions for pqi_send_scsi_raid_request */
787
788 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
789         u8 cmd, void *buffer, size_t buffer_length)
790 {
791         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
792                 buffer, buffer_length, 0, NULL);
793 }
794
795 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
796         u8 cmd, void *buffer, size_t buffer_length,
797         struct pqi_raid_error_info *error_info)
798 {
799         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
800                 buffer, buffer_length, 0, error_info);
801 }
802
803 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
804         struct bmic_identify_controller *buffer)
805 {
806         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
807                 buffer, sizeof(*buffer));
808 }
809
810 static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
811         struct bmic_sense_subsystem_info *sense_info)
812 {
813         return pqi_send_ctrl_raid_request(ctrl_info,
814                 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
815                 sizeof(*sense_info));
816 }
817
818 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
819         u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
820 {
821         return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
822                 buffer, buffer_length, vpd_page, NULL);
823 }
824
825 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
826         struct pqi_scsi_dev *device,
827         struct bmic_identify_physical_device *buffer, size_t buffer_length)
828 {
829         int rc;
830         enum dma_data_direction dir;
831         u16 bmic_device_index;
832         struct pqi_raid_path_request request;
833
834         rc = pqi_build_raid_path_request(ctrl_info, &request,
835                 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
836                 buffer_length, 0, &dir);
837         if (rc)
838                 return rc;
839
840         bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
841         request.cdb[2] = (u8)bmic_device_index;
842         request.cdb[9] = (u8)(bmic_device_index >> 8);
843
844         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
845
846         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
847
848         return rc;
849 }
850
851 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
852 {
853         u32 bytes;
854
855         bytes = get_unaligned_le16(limit);
856         if (bytes == 0)
857                 bytes = ~0;
858         else
859                 bytes *= 1024;
860
861         return bytes;
862 }
863
864 #pragma pack(1)
865
866 struct bmic_sense_feature_buffer {
867         struct bmic_sense_feature_buffer_header header;
868         struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
869 };
870
871 #pragma pack()
872
873 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH       \
874         offsetofend(struct bmic_sense_feature_buffer, \
875                 aio_subpage.max_write_raid_1_10_3drive)
876
877 #define MINIMUM_AIO_SUBPAGE_LENGTH      \
878         (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
879                 max_write_raid_1_10_3drive) - \
880                 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
881
882 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
883 {
884         int rc;
885         enum dma_data_direction dir;
886         struct pqi_raid_path_request request;
887         struct bmic_sense_feature_buffer *buffer;
888
889         buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
890         if (!buffer)
891                 return -ENOMEM;
892
893         rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
894                 buffer, sizeof(*buffer), 0, &dir);
895         if (rc)
896                 goto error;
897
898         request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
899         request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
900
901         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
902
903         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
904
905         if (rc)
906                 goto error;
907
908         if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
909                 buffer->header.subpage_code !=
910                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
911                 get_unaligned_le16(&buffer->header.buffer_length) <
912                         MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
913                 buffer->aio_subpage.header.page_code !=
914                         BMIC_SENSE_FEATURE_IO_PAGE ||
915                 buffer->aio_subpage.header.subpage_code !=
916                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
917                 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
918                         MINIMUM_AIO_SUBPAGE_LENGTH) {
919                 goto error;
920         }
921
922         ctrl_info->max_transfer_encrypted_sas_sata =
923                 pqi_aio_limit_to_bytes(
924                         &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
925
926         ctrl_info->max_transfer_encrypted_nvme =
927                 pqi_aio_limit_to_bytes(
928                         &buffer->aio_subpage.max_transfer_encrypted_nvme);
929
930         ctrl_info->max_write_raid_5_6 =
931                 pqi_aio_limit_to_bytes(
932                         &buffer->aio_subpage.max_write_raid_5_6);
933
934         ctrl_info->max_write_raid_1_10_2drive =
935                 pqi_aio_limit_to_bytes(
936                         &buffer->aio_subpage.max_write_raid_1_10_2drive);
937
938         ctrl_info->max_write_raid_1_10_3drive =
939                 pqi_aio_limit_to_bytes(
940                         &buffer->aio_subpage.max_write_raid_1_10_3drive);
941
942 error:
943         kfree(buffer);
944
945         return rc;
946 }
947
948 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
949         enum bmic_flush_cache_shutdown_event shutdown_event)
950 {
951         int rc;
952         struct bmic_flush_cache *flush_cache;
953
954         flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
955         if (!flush_cache)
956                 return -ENOMEM;
957
958         flush_cache->shutdown_event = shutdown_event;
959
960         rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
961                 sizeof(*flush_cache));
962
963         kfree(flush_cache);
964
965         return rc;
966 }
967
968 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
969         struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
970         struct pqi_raid_error_info *error_info)
971 {
972         return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
973                 buffer, buffer_length, error_info);
974 }
975
976 #define PQI_FETCH_PTRAID_DATA           (1 << 31)
977
978 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
979 {
980         int rc;
981         struct bmic_diag_options *diag;
982
983         diag = kzalloc(sizeof(*diag), GFP_KERNEL);
984         if (!diag)
985                 return -ENOMEM;
986
987         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
988                 diag, sizeof(*diag));
989         if (rc)
990                 goto out;
991
992         diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
993
994         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
995                 sizeof(*diag));
996
997 out:
998         kfree(diag);
999
1000         return rc;
1001 }
1002
1003 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
1004         void *buffer, size_t buffer_length)
1005 {
1006         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
1007                 buffer, buffer_length);
1008 }
1009
1010 #pragma pack(1)
1011
1012 struct bmic_host_wellness_driver_version {
1013         u8      start_tag[4];
1014         u8      driver_version_tag[2];
1015         __le16  driver_version_length;
1016         char    driver_version[32];
1017         u8      dont_write_tag[2];
1018         u8      end_tag[2];
1019 };
1020
1021 #pragma pack()
1022
1023 static int pqi_write_driver_version_to_host_wellness(
1024         struct pqi_ctrl_info *ctrl_info)
1025 {
1026         int rc;
1027         struct bmic_host_wellness_driver_version *buffer;
1028         size_t buffer_length;
1029
1030         buffer_length = sizeof(*buffer);
1031
1032         buffer = kmalloc(buffer_length, GFP_KERNEL);
1033         if (!buffer)
1034                 return -ENOMEM;
1035
1036         buffer->start_tag[0] = '<';
1037         buffer->start_tag[1] = 'H';
1038         buffer->start_tag[2] = 'W';
1039         buffer->start_tag[3] = '>';
1040         buffer->driver_version_tag[0] = 'D';
1041         buffer->driver_version_tag[1] = 'V';
1042         put_unaligned_le16(sizeof(buffer->driver_version),
1043                 &buffer->driver_version_length);
1044         strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1045                 sizeof(buffer->driver_version) - 1);
1046         buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
1047         buffer->dont_write_tag[0] = 'D';
1048         buffer->dont_write_tag[1] = 'W';
1049         buffer->end_tag[0] = 'Z';
1050         buffer->end_tag[1] = 'Z';
1051
1052         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1053
1054         kfree(buffer);
1055
1056         return rc;
1057 }
1058
1059 #pragma pack(1)
1060
1061 struct bmic_host_wellness_time {
1062         u8      start_tag[4];
1063         u8      time_tag[2];
1064         __le16  time_length;
1065         u8      time[8];
1066         u8      dont_write_tag[2];
1067         u8      end_tag[2];
1068 };
1069
1070 #pragma pack()
1071
1072 static int pqi_write_current_time_to_host_wellness(
1073         struct pqi_ctrl_info *ctrl_info)
1074 {
1075         int rc;
1076         struct bmic_host_wellness_time *buffer;
1077         size_t buffer_length;
1078         time64_t local_time;
1079         unsigned int year;
1080         struct tm tm;
1081
1082         buffer_length = sizeof(*buffer);
1083
1084         buffer = kmalloc(buffer_length, GFP_KERNEL);
1085         if (!buffer)
1086                 return -ENOMEM;
1087
1088         buffer->start_tag[0] = '<';
1089         buffer->start_tag[1] = 'H';
1090         buffer->start_tag[2] = 'W';
1091         buffer->start_tag[3] = '>';
1092         buffer->time_tag[0] = 'T';
1093         buffer->time_tag[1] = 'D';
1094         put_unaligned_le16(sizeof(buffer->time),
1095                 &buffer->time_length);
1096
1097         local_time = ktime_get_real_seconds();
1098         time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1099         year = tm.tm_year + 1900;
1100
1101         buffer->time[0] = bin2bcd(tm.tm_hour);
1102         buffer->time[1] = bin2bcd(tm.tm_min);
1103         buffer->time[2] = bin2bcd(tm.tm_sec);
1104         buffer->time[3] = 0;
1105         buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1106         buffer->time[5] = bin2bcd(tm.tm_mday);
1107         buffer->time[6] = bin2bcd(year / 100);
1108         buffer->time[7] = bin2bcd(year % 100);
1109
1110         buffer->dont_write_tag[0] = 'D';
1111         buffer->dont_write_tag[1] = 'W';
1112         buffer->end_tag[0] = 'Z';
1113         buffer->end_tag[1] = 'Z';
1114
1115         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1116
1117         kfree(buffer);
1118
1119         return rc;
1120 }
1121
1122 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
1123
1124 static void pqi_update_time_worker(struct work_struct *work)
1125 {
1126         int rc;
1127         struct pqi_ctrl_info *ctrl_info;
1128
1129         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1130                 update_time_work);
1131
1132         rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1133         if (rc)
1134                 dev_warn(&ctrl_info->pci_dev->dev,
1135                         "error updating time on controller\n");
1136
1137         schedule_delayed_work(&ctrl_info->update_time_work,
1138                 PQI_UPDATE_TIME_WORK_INTERVAL);
1139 }
1140
1141 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1142 {
1143         schedule_delayed_work(&ctrl_info->update_time_work, 0);
1144 }
1145
1146 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1147 {
1148         cancel_delayed_work_sync(&ctrl_info->update_time_work);
1149 }
1150
1151 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1152         size_t buffer_length)
1153 {
1154         return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1155 }
1156
1157 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1158 {
1159         int rc;
1160         size_t lun_list_length;
1161         size_t lun_data_length;
1162         size_t new_lun_list_length;
1163         void *lun_data = NULL;
1164         struct report_lun_header *report_lun_header;
1165
1166         report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1167         if (!report_lun_header) {
1168                 rc = -ENOMEM;
1169                 goto out;
1170         }
1171
1172         rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1173         if (rc)
1174                 goto out;
1175
1176         lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1177
1178 again:
1179         lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1180
1181         lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1182         if (!lun_data) {
1183                 rc = -ENOMEM;
1184                 goto out;
1185         }
1186
1187         if (lun_list_length == 0) {
1188                 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1189                 goto out;
1190         }
1191
1192         rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1193         if (rc)
1194                 goto out;
1195
1196         new_lun_list_length =
1197                 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1198
1199         if (new_lun_list_length > lun_list_length) {
1200                 lun_list_length = new_lun_list_length;
1201                 kfree(lun_data);
1202                 goto again;
1203         }
1204
1205 out:
1206         kfree(report_lun_header);
1207
1208         if (rc) {
1209                 kfree(lun_data);
1210                 lun_data = NULL;
1211         }
1212
1213         *buffer = lun_data;
1214
1215         return rc;
1216 }
1217
1218 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1219 {
1220         int rc;
1221         unsigned int i;
1222         u8 rpl_response_format;
1223         u32 num_physicals;
1224         void *rpl_list;
1225         struct report_lun_header *rpl_header;
1226         struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1227         struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1228
1229         rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1230         if (rc)
1231                 return rc;
1232
1233         if (ctrl_info->rpl_extended_format_4_5_supported) {
1234                 rpl_header = rpl_list;
1235                 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1236                 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1237                         *buffer = rpl_list;
1238                         return 0;
1239                 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1240                         dev_err(&ctrl_info->pci_dev->dev,
1241                                 "RPL returned unsupported data format %u\n",
1242                                 rpl_response_format);
1243                         return -EINVAL;
1244                 } else {
1245                         dev_warn(&ctrl_info->pci_dev->dev,
1246                                 "RPL returned extended format 2 instead of 4\n");
1247                 }
1248         }
1249
1250         rpl_8byte_wwid_list = rpl_list;
1251         num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1252
1253         rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1254                                                    num_physicals), GFP_KERNEL);
1255         if (!rpl_16byte_wwid_list)
1256                 return -ENOMEM;
1257
1258         put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1259                 &rpl_16byte_wwid_list->header.list_length);
1260         rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1261
1262         for (i = 0; i < num_physicals; i++) {
1263                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1264                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1265                 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1266                 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1267                 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1268                 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1269                 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1270                 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1271         }
1272
1273         kfree(rpl_8byte_wwid_list);
1274         *buffer = rpl_16byte_wwid_list;
1275
1276         return 0;
1277 }
1278
1279 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1280 {
1281         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1282 }
1283
1284 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1285         struct report_phys_lun_16byte_wwid_list **physdev_list,
1286         struct report_log_lun_list **logdev_list)
1287 {
1288         int rc;
1289         size_t logdev_list_length;
1290         size_t logdev_data_length;
1291         struct report_log_lun_list *internal_logdev_list;
1292         struct report_log_lun_list *logdev_data;
1293         struct report_lun_header report_lun_header;
1294
1295         rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1296         if (rc)
1297                 dev_err(&ctrl_info->pci_dev->dev,
1298                         "report physical LUNs failed\n");
1299
1300         rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1301         if (rc)
1302                 dev_err(&ctrl_info->pci_dev->dev,
1303                         "report logical LUNs failed\n");
1304
1305         /*
1306          * Tack the controller itself onto the end of the logical device list
1307          * by adding a list entry that is all zeros.
1308          */
1309
1310         logdev_data = *logdev_list;
1311
1312         if (logdev_data) {
1313                 logdev_list_length =
1314                         get_unaligned_be32(&logdev_data->header.list_length);
1315         } else {
1316                 memset(&report_lun_header, 0, sizeof(report_lun_header));
1317                 logdev_data =
1318                         (struct report_log_lun_list *)&report_lun_header;
1319                 logdev_list_length = 0;
1320         }
1321
1322         logdev_data_length = sizeof(struct report_lun_header) +
1323                 logdev_list_length;
1324
1325         internal_logdev_list = kmalloc(logdev_data_length +
1326                 sizeof(struct report_log_lun), GFP_KERNEL);
1327         if (!internal_logdev_list) {
1328                 kfree(*logdev_list);
1329                 *logdev_list = NULL;
1330                 return -ENOMEM;
1331         }
1332
1333         memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1334         memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1335                 sizeof(struct report_log_lun));
1336         put_unaligned_be32(logdev_list_length +
1337                 sizeof(struct report_log_lun),
1338                 &internal_logdev_list->header.list_length);
1339
1340         kfree(*logdev_list);
1341         *logdev_list = internal_logdev_list;
1342
1343         return 0;
1344 }
1345
1346 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1347         int bus, int target, int lun)
1348 {
1349         device->bus = bus;
1350         device->target = target;
1351         device->lun = lun;
1352 }
1353
1354 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1355 {
1356         u8 *scsi3addr;
1357         u32 lunid;
1358         int bus;
1359         int target;
1360         int lun;
1361
1362         scsi3addr = device->scsi3addr;
1363         lunid = get_unaligned_le32(scsi3addr);
1364
1365         if (pqi_is_hba_lunid(scsi3addr)) {
1366                 /* The specified device is the controller. */
1367                 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1368                 device->target_lun_valid = true;
1369                 return;
1370         }
1371
1372         if (pqi_is_logical_device(device)) {
1373                 if (device->is_external_raid_device) {
1374                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1375                         target = (lunid >> 16) & 0x3fff;
1376                         lun = lunid & 0xff;
1377                 } else {
1378                         bus = PQI_RAID_VOLUME_BUS;
1379                         target = 0;
1380                         lun = lunid & 0x3fff;
1381                 }
1382                 pqi_set_bus_target_lun(device, bus, target, lun);
1383                 device->target_lun_valid = true;
1384                 return;
1385         }
1386
1387         /*
1388          * Defer target and LUN assignment for non-controller physical devices
1389          * because the SAS transport layer will make these assignments later.
1390          */
1391         pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1392 }
1393
1394 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1395         struct pqi_scsi_dev *device)
1396 {
1397         int rc;
1398         u8 raid_level;
1399         u8 *buffer;
1400
1401         raid_level = SA_RAID_UNKNOWN;
1402
1403         buffer = kmalloc(64, GFP_KERNEL);
1404         if (buffer) {
1405                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1406                         VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1407                 if (rc == 0) {
1408                         raid_level = buffer[8];
1409                         if (raid_level > SA_RAID_MAX)
1410                                 raid_level = SA_RAID_UNKNOWN;
1411                 }
1412                 kfree(buffer);
1413         }
1414
1415         device->raid_level = raid_level;
1416 }
1417
1418 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1419         struct pqi_scsi_dev *device, struct raid_map *raid_map)
1420 {
1421         char *err_msg;
1422         u32 raid_map_size;
1423         u32 r5or6_blocks_per_row;
1424
1425         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1426
1427         if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1428                 err_msg = "RAID map too small";
1429                 goto bad_raid_map;
1430         }
1431
1432         if (device->raid_level == SA_RAID_1) {
1433                 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1434                         err_msg = "invalid RAID-1 map";
1435                         goto bad_raid_map;
1436                 }
1437         } else if (device->raid_level == SA_RAID_TRIPLE) {
1438                 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1439                         err_msg = "invalid RAID-1(Triple) map";
1440                         goto bad_raid_map;
1441                 }
1442         } else if ((device->raid_level == SA_RAID_5 ||
1443                 device->raid_level == SA_RAID_6) &&
1444                 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1445                 /* RAID 50/60 */
1446                 r5or6_blocks_per_row =
1447                         get_unaligned_le16(&raid_map->strip_size) *
1448                         get_unaligned_le16(&raid_map->data_disks_per_row);
1449                 if (r5or6_blocks_per_row == 0) {
1450                         err_msg = "invalid RAID-5 or RAID-6 map";
1451                         goto bad_raid_map;
1452                 }
1453         }
1454
1455         return 0;
1456
1457 bad_raid_map:
1458         dev_warn(&ctrl_info->pci_dev->dev,
1459                 "logical device %08x%08x %s\n",
1460                 *((u32 *)&device->scsi3addr),
1461                 *((u32 *)&device->scsi3addr[4]), err_msg);
1462
1463         return -EINVAL;
1464 }
1465
1466 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1467         struct pqi_scsi_dev *device)
1468 {
1469         int rc;
1470         u32 raid_map_size;
1471         struct raid_map *raid_map;
1472
1473         raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1474         if (!raid_map)
1475                 return -ENOMEM;
1476
1477         rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1478                 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1479         if (rc)
1480                 goto error;
1481
1482         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1483
1484         if (raid_map_size > sizeof(*raid_map)) {
1485
1486                 kfree(raid_map);
1487
1488                 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1489                 if (!raid_map)
1490                         return -ENOMEM;
1491
1492                 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1493                         device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1494                 if (rc)
1495                         goto error;
1496
1497                 if (get_unaligned_le32(&raid_map->structure_size)
1498                         != raid_map_size) {
1499                         dev_warn(&ctrl_info->pci_dev->dev,
1500                                 "requested %u bytes, received %u bytes\n",
1501                                 raid_map_size,
1502                                 get_unaligned_le32(&raid_map->structure_size));
1503                         rc = -EINVAL;
1504                         goto error;
1505                 }
1506         }
1507
1508         rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1509         if (rc)
1510                 goto error;
1511
1512         device->raid_map = raid_map;
1513
1514         return 0;
1515
1516 error:
1517         kfree(raid_map);
1518
1519         return rc;
1520 }
1521
1522 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1523         struct pqi_scsi_dev *device)
1524 {
1525         if (!ctrl_info->lv_drive_type_mix_valid) {
1526                 device->max_transfer_encrypted = ~0;
1527                 return;
1528         }
1529
1530         switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1531         case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1532         case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1533         case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1534         case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1535         case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1536         case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1537         case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1538                 device->max_transfer_encrypted =
1539                         ctrl_info->max_transfer_encrypted_sas_sata;
1540                 break;
1541         case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1542                 device->max_transfer_encrypted =
1543                         ctrl_info->max_transfer_encrypted_nvme;
1544                 break;
1545         case LV_DRIVE_TYPE_MIX_UNKNOWN:
1546         case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1547         default:
1548                 device->max_transfer_encrypted =
1549                         min(ctrl_info->max_transfer_encrypted_sas_sata,
1550                                 ctrl_info->max_transfer_encrypted_nvme);
1551                 break;
1552         }
1553 }
1554
1555 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1556         struct pqi_scsi_dev *device)
1557 {
1558         int rc;
1559         u8 *buffer;
1560         u8 bypass_status;
1561
1562         buffer = kmalloc(64, GFP_KERNEL);
1563         if (!buffer)
1564                 return;
1565
1566         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1567                 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1568         if (rc)
1569                 goto out;
1570
1571 #define RAID_BYPASS_STATUS              4
1572 #define RAID_BYPASS_CONFIGURED          0x1
1573 #define RAID_BYPASS_ENABLED             0x2
1574
1575         bypass_status = buffer[RAID_BYPASS_STATUS];
1576         device->raid_bypass_configured =
1577                 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1578         if (device->raid_bypass_configured &&
1579                 (bypass_status & RAID_BYPASS_ENABLED) &&
1580                 pqi_get_raid_map(ctrl_info, device) == 0) {
1581                 device->raid_bypass_enabled = true;
1582                 if (get_unaligned_le16(&device->raid_map->flags) &
1583                         RAID_MAP_ENCRYPTION_ENABLED)
1584                         pqi_set_max_transfer_encrypted(ctrl_info, device);
1585         }
1586
1587 out:
1588         kfree(buffer);
1589 }
1590
1591 /*
1592  * Use vendor-specific VPD to determine online/offline status of a volume.
1593  */
1594
1595 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1596         struct pqi_scsi_dev *device)
1597 {
1598         int rc;
1599         size_t page_length;
1600         u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1601         bool volume_offline = true;
1602         u32 volume_flags;
1603         struct ciss_vpd_logical_volume_status *vpd;
1604
1605         vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1606         if (!vpd)
1607                 goto no_buffer;
1608
1609         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1610                 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1611         if (rc)
1612                 goto out;
1613
1614         if (vpd->page_code != CISS_VPD_LV_STATUS)
1615                 goto out;
1616
1617         page_length = offsetof(struct ciss_vpd_logical_volume_status,
1618                 volume_status) + vpd->page_length;
1619         if (page_length < sizeof(*vpd))
1620                 goto out;
1621
1622         volume_status = vpd->volume_status;
1623         volume_flags = get_unaligned_be32(&vpd->flags);
1624         volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1625
1626 out:
1627         kfree(vpd);
1628 no_buffer:
1629         device->volume_status = volume_status;
1630         device->volume_offline = volume_offline;
1631 }
1632
1633 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED   0x01
1634 #define PQI_DEVICE_PHY_MAP_SUPPORTED    0x10
1635 #define PQI_DEVICE_ERASE_IN_PROGRESS    0x10
1636
1637 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1638         struct pqi_scsi_dev *device,
1639         struct bmic_identify_physical_device *id_phys)
1640 {
1641         int rc;
1642
1643         memset(id_phys, 0, sizeof(*id_phys));
1644
1645         rc = pqi_identify_physical_device(ctrl_info, device,
1646                 id_phys, sizeof(*id_phys));
1647         if (rc) {
1648                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1649                 return rc;
1650         }
1651
1652         scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1653         scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1654
1655         memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1656         memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1657
1658         device->box_index = id_phys->box_index;
1659         device->phys_box_on_bus = id_phys->phys_box_on_bus;
1660         device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1661         device->queue_depth =
1662                 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1663         device->active_path_index = id_phys->active_path_number;
1664         device->path_map = id_phys->redundant_path_present_map;
1665         memcpy(&device->box,
1666                 &id_phys->alternate_paths_phys_box_on_port,
1667                 sizeof(device->box));
1668         memcpy(&device->phys_connector,
1669                 &id_phys->alternate_paths_phys_connector,
1670                 sizeof(device->phys_connector));
1671         device->bay = id_phys->phys_bay_in_box;
1672         device->lun_count = id_phys->multi_lun_device_lun_count;
1673         if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1674                 id_phys->phy_count)
1675                 device->phy_id =
1676                         id_phys->phy_to_phy_map[device->active_path_index];
1677         else
1678                 device->phy_id = 0xFF;
1679
1680         device->ncq_prio_support =
1681                 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1682                 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1683
1684         device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1685
1686         return 0;
1687 }
1688
1689 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1690         struct pqi_scsi_dev *device)
1691 {
1692         int rc;
1693         u8 *buffer;
1694
1695         buffer = kmalloc(64, GFP_KERNEL);
1696         if (!buffer)
1697                 return -ENOMEM;
1698
1699         /* Send an inquiry to the device to see what it is. */
1700         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1701         if (rc)
1702                 goto out;
1703
1704         scsi_sanitize_inquiry_string(&buffer[8], 8);
1705         scsi_sanitize_inquiry_string(&buffer[16], 16);
1706
1707         device->devtype = buffer[0] & 0x1f;
1708         memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1709         memcpy(device->model, &buffer[16], sizeof(device->model));
1710
1711         if (device->devtype == TYPE_DISK) {
1712                 if (device->is_external_raid_device) {
1713                         device->raid_level = SA_RAID_UNKNOWN;
1714                         device->volume_status = CISS_LV_OK;
1715                         device->volume_offline = false;
1716                 } else {
1717                         pqi_get_raid_level(ctrl_info, device);
1718                         pqi_get_raid_bypass_status(ctrl_info, device);
1719                         pqi_get_volume_status(ctrl_info, device);
1720                 }
1721         }
1722
1723 out:
1724         kfree(buffer);
1725
1726         return rc;
1727 }
1728
1729 /*
1730  * Prevent adding drive to OS for some corner cases such as a drive
1731  * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1732  * the drive until the sanitize completes, which can take hours,
1733  * resulting in long bootup delays. Commands such as TUR, READ_CAP
1734  * are allowed, but READ/WRITE cause check condition. So the OS
1735  * cannot check/read the partition table.
1736  * Note: devices that have completed sanitize must be re-enabled
1737  *       using the management utility.
1738  */
1739 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1740 {
1741         return device->erase_in_progress;
1742 }
1743
1744 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1745         struct pqi_scsi_dev *device,
1746         struct bmic_identify_physical_device *id_phys)
1747 {
1748         int rc;
1749
1750         if (device->is_expander_smp_device)
1751                 return 0;
1752
1753         if (pqi_is_logical_device(device))
1754                 rc = pqi_get_logical_device_info(ctrl_info, device);
1755         else
1756                 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1757
1758         return rc;
1759 }
1760
1761 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1762         struct pqi_scsi_dev *device,
1763         struct bmic_identify_physical_device *id_phys)
1764 {
1765         int rc;
1766
1767         rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1768
1769         if (rc == 0 && device->lun_count == 0)
1770                 device->lun_count = 1;
1771
1772         return rc;
1773 }
1774
1775 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1776         struct pqi_scsi_dev *device)
1777 {
1778         char *status;
1779         static const char unknown_state_str[] =
1780                 "Volume is in an unknown state (%u)";
1781         char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1782
1783         switch (device->volume_status) {
1784         case CISS_LV_OK:
1785                 status = "Volume online";
1786                 break;
1787         case CISS_LV_FAILED:
1788                 status = "Volume failed";
1789                 break;
1790         case CISS_LV_NOT_CONFIGURED:
1791                 status = "Volume not configured";
1792                 break;
1793         case CISS_LV_DEGRADED:
1794                 status = "Volume degraded";
1795                 break;
1796         case CISS_LV_READY_FOR_RECOVERY:
1797                 status = "Volume ready for recovery operation";
1798                 break;
1799         case CISS_LV_UNDERGOING_RECOVERY:
1800                 status = "Volume undergoing recovery";
1801                 break;
1802         case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1803                 status = "Wrong physical drive was replaced";
1804                 break;
1805         case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1806                 status = "A physical drive not properly connected";
1807                 break;
1808         case CISS_LV_HARDWARE_OVERHEATING:
1809                 status = "Hardware is overheating";
1810                 break;
1811         case CISS_LV_HARDWARE_HAS_OVERHEATED:
1812                 status = "Hardware has overheated";
1813                 break;
1814         case CISS_LV_UNDERGOING_EXPANSION:
1815                 status = "Volume undergoing expansion";
1816                 break;
1817         case CISS_LV_NOT_AVAILABLE:
1818                 status = "Volume waiting for transforming volume";
1819                 break;
1820         case CISS_LV_QUEUED_FOR_EXPANSION:
1821                 status = "Volume queued for expansion";
1822                 break;
1823         case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1824                 status = "Volume disabled due to SCSI ID conflict";
1825                 break;
1826         case CISS_LV_EJECTED:
1827                 status = "Volume has been ejected";
1828                 break;
1829         case CISS_LV_UNDERGOING_ERASE:
1830                 status = "Volume undergoing background erase";
1831                 break;
1832         case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1833                 status = "Volume ready for predictive spare rebuild";
1834                 break;
1835         case CISS_LV_UNDERGOING_RPI:
1836                 status = "Volume undergoing rapid parity initialization";
1837                 break;
1838         case CISS_LV_PENDING_RPI:
1839                 status = "Volume queued for rapid parity initialization";
1840                 break;
1841         case CISS_LV_ENCRYPTED_NO_KEY:
1842                 status = "Encrypted volume inaccessible - key not present";
1843                 break;
1844         case CISS_LV_UNDERGOING_ENCRYPTION:
1845                 status = "Volume undergoing encryption process";
1846                 break;
1847         case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1848                 status = "Volume undergoing encryption re-keying process";
1849                 break;
1850         case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1851                 status = "Volume encrypted but encryption is disabled";
1852                 break;
1853         case CISS_LV_PENDING_ENCRYPTION:
1854                 status = "Volume pending migration to encrypted state";
1855                 break;
1856         case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1857                 status = "Volume pending encryption rekeying";
1858                 break;
1859         case CISS_LV_NOT_SUPPORTED:
1860                 status = "Volume not supported on this controller";
1861                 break;
1862         case CISS_LV_STATUS_UNAVAILABLE:
1863                 status = "Volume status not available";
1864                 break;
1865         default:
1866                 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1867                         unknown_state_str, device->volume_status);
1868                 status = unknown_state_buffer;
1869                 break;
1870         }
1871
1872         dev_info(&ctrl_info->pci_dev->dev,
1873                 "scsi %d:%d:%d:%d %s\n",
1874                 ctrl_info->scsi_host->host_no,
1875                 device->bus, device->target, device->lun, status);
1876 }
1877
1878 static void pqi_rescan_worker(struct work_struct *work)
1879 {
1880         struct pqi_ctrl_info *ctrl_info;
1881
1882         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1883                 rescan_work);
1884
1885         pqi_scan_scsi_devices(ctrl_info);
1886 }
1887
1888 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1889         struct pqi_scsi_dev *device)
1890 {
1891         int rc;
1892
1893         if (pqi_is_logical_device(device))
1894                 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1895                         device->target, device->lun);
1896         else
1897                 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1898
1899         return rc;
1900 }
1901
1902 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS      (20 * 1000)
1903
1904 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1905 {
1906         int rc;
1907         int lun;
1908
1909         for (lun = 0; lun < device->lun_count; lun++) {
1910                 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1911                         PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1912                 if (rc)
1913                         dev_err(&ctrl_info->pci_dev->dev,
1914                                 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1915                                 ctrl_info->scsi_host->host_no, device->bus,
1916                                 device->target, lun,
1917                                 atomic_read(&device->scsi_cmds_outstanding[lun]));
1918         }
1919
1920         if (pqi_is_logical_device(device))
1921                 scsi_remove_device(device->sdev);
1922         else
1923                 pqi_remove_sas_device(device);
1924
1925         pqi_device_remove_start(device);
1926 }
1927
1928 /* Assumes the SCSI device list lock is held. */
1929
1930 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1931         int bus, int target, int lun)
1932 {
1933         struct pqi_scsi_dev *device;
1934
1935         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1936                 if (device->bus == bus && device->target == target && device->lun == lun)
1937                         return device;
1938
1939         return NULL;
1940 }
1941
1942 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1943 {
1944         if (dev1->is_physical_device != dev2->is_physical_device)
1945                 return false;
1946
1947         if (dev1->is_physical_device)
1948                 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1949
1950         return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1951 }
1952
1953 enum pqi_find_result {
1954         DEVICE_NOT_FOUND,
1955         DEVICE_CHANGED,
1956         DEVICE_SAME,
1957 };
1958
1959 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1960         struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1961 {
1962         struct pqi_scsi_dev *device;
1963
1964         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1965                 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1966                         *matching_device = device;
1967                         if (pqi_device_equal(device_to_find, device)) {
1968                                 if (device_to_find->volume_offline)
1969                                         return DEVICE_CHANGED;
1970                                 return DEVICE_SAME;
1971                         }
1972                         return DEVICE_CHANGED;
1973                 }
1974         }
1975
1976         return DEVICE_NOT_FOUND;
1977 }
1978
1979 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1980 {
1981         if (device->is_expander_smp_device)
1982                 return "Enclosure SMP    ";
1983
1984         return scsi_device_type(device->devtype);
1985 }
1986
1987 #define PQI_DEV_INFO_BUFFER_LENGTH      128
1988
1989 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1990         char *action, struct pqi_scsi_dev *device)
1991 {
1992         ssize_t count;
1993         char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1994
1995         count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
1996                 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
1997
1998         if (device->target_lun_valid)
1999                 count += scnprintf(buffer + count,
2000                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2001                         "%d:%d",
2002                         device->target,
2003                         device->lun);
2004         else
2005                 count += scnprintf(buffer + count,
2006                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2007                         "-:-");
2008
2009         if (pqi_is_logical_device(device))
2010                 count += scnprintf(buffer + count,
2011                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2012                         " %08x%08x",
2013                         *((u32 *)&device->scsi3addr),
2014                         *((u32 *)&device->scsi3addr[4]));
2015         else
2016                 count += scnprintf(buffer + count,
2017                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2018                         " %016llx%016llx",
2019                         get_unaligned_be64(&device->wwid[0]),
2020                         get_unaligned_be64(&device->wwid[8]));
2021
2022         count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2023                 " %s %.8s %.16s ",
2024                 pqi_device_type(device),
2025                 device->vendor,
2026                 device->model);
2027
2028         if (pqi_is_logical_device(device)) {
2029                 if (device->devtype == TYPE_DISK)
2030                         count += scnprintf(buffer + count,
2031                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2032                                 "SSDSmartPathCap%c En%c %-12s",
2033                                 device->raid_bypass_configured ? '+' : '-',
2034                                 device->raid_bypass_enabled ? '+' : '-',
2035                                 pqi_raid_level_to_string(device->raid_level));
2036         } else {
2037                 count += scnprintf(buffer + count,
2038                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2039                         "AIO%c", device->aio_enabled ? '+' : '-');
2040                 if (device->devtype == TYPE_DISK ||
2041                         device->devtype == TYPE_ZBC)
2042                         count += scnprintf(buffer + count,
2043                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2044                                 " qd=%-6d", device->queue_depth);
2045         }
2046
2047         dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2048 }
2049
2050 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2051 {
2052         u32 raid_map1_size;
2053         u32 raid_map2_size;
2054
2055         if (raid_map1 == NULL || raid_map2 == NULL)
2056                 return raid_map1 == raid_map2;
2057
2058         raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2059         raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2060
2061         if (raid_map1_size != raid_map2_size)
2062                 return false;
2063
2064         return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2065 }
2066
2067 /* Assumes the SCSI device list lock is held. */
2068
2069 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2070         struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2071 {
2072         existing_device->device_type = new_device->device_type;
2073         existing_device->bus = new_device->bus;
2074         if (new_device->target_lun_valid) {
2075                 existing_device->target = new_device->target;
2076                 existing_device->lun = new_device->lun;
2077                 existing_device->target_lun_valid = true;
2078         }
2079
2080         /* By definition, the scsi3addr and wwid fields are already the same. */
2081
2082         existing_device->is_physical_device = new_device->is_physical_device;
2083         memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2084         memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2085         existing_device->sas_address = new_device->sas_address;
2086         existing_device->queue_depth = new_device->queue_depth;
2087         existing_device->device_offline = false;
2088         existing_device->lun_count = new_device->lun_count;
2089
2090         if (pqi_is_logical_device(existing_device)) {
2091                 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2092
2093                 if (existing_device->devtype == TYPE_DISK) {
2094                         existing_device->raid_level = new_device->raid_level;
2095                         existing_device->volume_status = new_device->volume_status;
2096                         if (ctrl_info->logical_volume_rescan_needed)
2097                                 existing_device->rescan = true;
2098                         memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2099                         if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2100                                 kfree(existing_device->raid_map);
2101                                 existing_device->raid_map = new_device->raid_map;
2102                                 /* To prevent this from being freed later. */
2103                                 new_device->raid_map = NULL;
2104                         }
2105                         existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2106                         existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2107                 }
2108         } else {
2109                 existing_device->aio_enabled = new_device->aio_enabled;
2110                 existing_device->aio_handle = new_device->aio_handle;
2111                 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2112                 existing_device->active_path_index = new_device->active_path_index;
2113                 existing_device->phy_id = new_device->phy_id;
2114                 existing_device->path_map = new_device->path_map;
2115                 existing_device->bay = new_device->bay;
2116                 existing_device->box_index = new_device->box_index;
2117                 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2118                 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2119                 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2120                 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2121         }
2122 }
2123
2124 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2125 {
2126         if (device) {
2127                 kfree(device->raid_map);
2128                 kfree(device);
2129         }
2130 }
2131
2132 /*
2133  * Called when exposing a new device to the OS fails in order to re-adjust
2134  * our internal SCSI device list to match the SCSI ML's view.
2135  */
2136
2137 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2138         struct pqi_scsi_dev *device)
2139 {
2140         unsigned long flags;
2141
2142         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2143         list_del(&device->scsi_device_list_entry);
2144         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2145
2146         /* Allow the device structure to be freed later. */
2147         device->keep_device = false;
2148 }
2149
2150 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2151 {
2152         if (device->is_expander_smp_device)
2153                 return device->sas_port != NULL;
2154
2155         return device->sdev != NULL;
2156 }
2157
2158 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
2159 {
2160         unsigned int lun;
2161         struct pqi_tmf_work *tmf_work;
2162
2163         for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
2164                 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
2165 }
2166
2167 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2168         struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2169 {
2170         int rc;
2171         unsigned int i;
2172         unsigned long flags;
2173         enum pqi_find_result find_result;
2174         struct pqi_scsi_dev *device;
2175         struct pqi_scsi_dev *next;
2176         struct pqi_scsi_dev *matching_device;
2177         LIST_HEAD(add_list);
2178         LIST_HEAD(delete_list);
2179
2180         /*
2181          * The idea here is to do as little work as possible while holding the
2182          * spinlock.  That's why we go to great pains to defer anything other
2183          * than updating the internal device list until after we release the
2184          * spinlock.
2185          */
2186
2187         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2188
2189         /* Assume that all devices in the existing list have gone away. */
2190         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2191                 device->device_gone = true;
2192
2193         for (i = 0; i < num_new_devices; i++) {
2194                 device = new_device_list[i];
2195
2196                 find_result = pqi_scsi_find_entry(ctrl_info, device,
2197                         &matching_device);
2198
2199                 switch (find_result) {
2200                 case DEVICE_SAME:
2201                         /*
2202                          * The newly found device is already in the existing
2203                          * device list.
2204                          */
2205                         device->new_device = false;
2206                         matching_device->device_gone = false;
2207                         pqi_scsi_update_device(ctrl_info, matching_device, device);
2208                         break;
2209                 case DEVICE_NOT_FOUND:
2210                         /*
2211                          * The newly found device is NOT in the existing device
2212                          * list.
2213                          */
2214                         device->new_device = true;
2215                         break;
2216                 case DEVICE_CHANGED:
2217                         /*
2218                          * The original device has gone away and we need to add
2219                          * the new device.
2220                          */
2221                         device->new_device = true;
2222                         break;
2223                 }
2224         }
2225
2226         /* Process all devices that have gone away. */
2227         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2228                 scsi_device_list_entry) {
2229                 if (device->device_gone) {
2230                         list_del(&device->scsi_device_list_entry);
2231                         list_add_tail(&device->delete_list_entry, &delete_list);
2232                 }
2233         }
2234
2235         /* Process all new devices. */
2236         for (i = 0; i < num_new_devices; i++) {
2237                 device = new_device_list[i];
2238                 if (!device->new_device)
2239                         continue;
2240                 if (device->volume_offline)
2241                         continue;
2242                 list_add_tail(&device->scsi_device_list_entry,
2243                         &ctrl_info->scsi_device_list);
2244                 list_add_tail(&device->add_list_entry, &add_list);
2245                 /* To prevent this device structure from being freed later. */
2246                 device->keep_device = true;
2247                 pqi_init_device_tmf_work(device);
2248         }
2249
2250         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2251
2252         /*
2253          * If OFA is in progress and there are devices that need to be deleted,
2254          * allow any pending reset operations to continue and unblock any SCSI
2255          * requests before removal.
2256          */
2257         if (pqi_ofa_in_progress(ctrl_info)) {
2258                 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2259                         if (pqi_is_device_added(device))
2260                                 pqi_device_remove_start(device);
2261                 pqi_ctrl_unblock_device_reset(ctrl_info);
2262                 pqi_scsi_unblock_requests(ctrl_info);
2263         }
2264
2265         /* Remove all devices that have gone away. */
2266         list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2267                 if (device->volume_offline) {
2268                         pqi_dev_info(ctrl_info, "offline", device);
2269                         pqi_show_volume_status(ctrl_info, device);
2270                 } else {
2271                         pqi_dev_info(ctrl_info, "removed", device);
2272                 }
2273                 if (pqi_is_device_added(device))
2274                         pqi_remove_device(ctrl_info, device);
2275                 list_del(&device->delete_list_entry);
2276                 pqi_free_device(device);
2277         }
2278
2279         /*
2280          * Notify the SML of any existing device changes such as;
2281          * queue depth, device size.
2282          */
2283         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2284                 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2285                         device->advertised_queue_depth = device->queue_depth;
2286                         scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2287                         if (device->rescan) {
2288                                 scsi_rescan_device(device->sdev);
2289                                 device->rescan = false;
2290                         }
2291                 }
2292         }
2293
2294         /* Expose any new devices. */
2295         list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2296                 if (!pqi_is_device_added(device)) {
2297                         rc = pqi_add_device(ctrl_info, device);
2298                         if (rc == 0) {
2299                                 pqi_dev_info(ctrl_info, "added", device);
2300                         } else {
2301                                 dev_warn(&ctrl_info->pci_dev->dev,
2302                                         "scsi %d:%d:%d:%d addition failed, device not added\n",
2303                                         ctrl_info->scsi_host->host_no,
2304                                         device->bus, device->target,
2305                                         device->lun);
2306                                 pqi_fixup_botched_add(ctrl_info, device);
2307                         }
2308                 }
2309         }
2310
2311         ctrl_info->logical_volume_rescan_needed = false;
2312
2313 }
2314
2315 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2316 {
2317         /*
2318          * Only support the HBA controller itself as a RAID
2319          * controller.  If it's a RAID controller other than
2320          * the HBA itself (an external RAID controller, for
2321          * example), we don't support it.
2322          */
2323         if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2324                 !pqi_is_hba_lunid(device->scsi3addr))
2325                         return false;
2326
2327         return true;
2328 }
2329
2330 static inline bool pqi_skip_device(u8 *scsi3addr)
2331 {
2332         /* Ignore all masked devices. */
2333         if (MASKED_DEVICE(scsi3addr))
2334                 return true;
2335
2336         return false;
2337 }
2338
2339 static inline void pqi_mask_device(u8 *scsi3addr)
2340 {
2341         scsi3addr[3] |= 0xc0;
2342 }
2343
2344 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
2345 {
2346         if (pqi_is_logical_device(device))
2347                 return false;
2348
2349         return (device->path_map & (device->path_map - 1)) != 0;
2350 }
2351
2352 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2353 {
2354         return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2355 }
2356
2357 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2358 {
2359         int i;
2360         int rc;
2361         LIST_HEAD(new_device_list_head);
2362         struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2363         struct report_log_lun_list *logdev_list = NULL;
2364         struct report_phys_lun_16byte_wwid *phys_lun;
2365         struct report_log_lun *log_lun;
2366         struct bmic_identify_physical_device *id_phys = NULL;
2367         u32 num_physicals;
2368         u32 num_logicals;
2369         struct pqi_scsi_dev **new_device_list = NULL;
2370         struct pqi_scsi_dev *device;
2371         struct pqi_scsi_dev *next;
2372         unsigned int num_new_devices;
2373         unsigned int num_valid_devices;
2374         bool is_physical_device;
2375         u8 *scsi3addr;
2376         unsigned int physical_index;
2377         unsigned int logical_index;
2378         static char *out_of_memory_msg =
2379                 "failed to allocate memory, device discovery stopped";
2380
2381         rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2382         if (rc)
2383                 goto out;
2384
2385         if (physdev_list)
2386                 num_physicals =
2387                         get_unaligned_be32(&physdev_list->header.list_length)
2388                                 / sizeof(physdev_list->lun_entries[0]);
2389         else
2390                 num_physicals = 0;
2391
2392         if (logdev_list)
2393                 num_logicals =
2394                         get_unaligned_be32(&logdev_list->header.list_length)
2395                                 / sizeof(logdev_list->lun_entries[0]);
2396         else
2397                 num_logicals = 0;
2398
2399         if (num_physicals) {
2400                 /*
2401                  * We need this buffer for calls to pqi_get_physical_disk_info()
2402                  * below.  We allocate it here instead of inside
2403                  * pqi_get_physical_disk_info() because it's a fairly large
2404                  * buffer.
2405                  */
2406                 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2407                 if (!id_phys) {
2408                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2409                                 out_of_memory_msg);
2410                         rc = -ENOMEM;
2411                         goto out;
2412                 }
2413
2414                 if (pqi_hide_vsep) {
2415                         for (i = num_physicals - 1; i >= 0; i--) {
2416                                 phys_lun = &physdev_list->lun_entries[i];
2417                                 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2418                                         pqi_mask_device(phys_lun->lunid);
2419                                         break;
2420                                 }
2421                         }
2422                 }
2423         }
2424
2425         if (num_logicals &&
2426                 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2427                 ctrl_info->lv_drive_type_mix_valid = true;
2428
2429         num_new_devices = num_physicals + num_logicals;
2430
2431         new_device_list = kmalloc_array(num_new_devices,
2432                                         sizeof(*new_device_list),
2433                                         GFP_KERNEL);
2434         if (!new_device_list) {
2435                 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2436                 rc = -ENOMEM;
2437                 goto out;
2438         }
2439
2440         for (i = 0; i < num_new_devices; i++) {
2441                 device = kzalloc(sizeof(*device), GFP_KERNEL);
2442                 if (!device) {
2443                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2444                                 out_of_memory_msg);
2445                         rc = -ENOMEM;
2446                         goto out;
2447                 }
2448                 list_add_tail(&device->new_device_list_entry,
2449                         &new_device_list_head);
2450         }
2451
2452         device = NULL;
2453         num_valid_devices = 0;
2454         physical_index = 0;
2455         logical_index = 0;
2456
2457         for (i = 0; i < num_new_devices; i++) {
2458
2459                 if ((!pqi_expose_ld_first && i < num_physicals) ||
2460                         (pqi_expose_ld_first && i >= num_logicals)) {
2461                         is_physical_device = true;
2462                         phys_lun = &physdev_list->lun_entries[physical_index++];
2463                         log_lun = NULL;
2464                         scsi3addr = phys_lun->lunid;
2465                 } else {
2466                         is_physical_device = false;
2467                         phys_lun = NULL;
2468                         log_lun = &logdev_list->lun_entries[logical_index++];
2469                         scsi3addr = log_lun->lunid;
2470                 }
2471
2472                 if (is_physical_device && pqi_skip_device(scsi3addr))
2473                         continue;
2474
2475                 if (device)
2476                         device = list_next_entry(device, new_device_list_entry);
2477                 else
2478                         device = list_first_entry(&new_device_list_head,
2479                                 struct pqi_scsi_dev, new_device_list_entry);
2480
2481                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2482                 device->is_physical_device = is_physical_device;
2483                 if (is_physical_device) {
2484                         device->device_type = phys_lun->device_type;
2485                         if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2486                                 device->is_expander_smp_device = true;
2487                 } else {
2488                         device->is_external_raid_device =
2489                                 pqi_is_external_raid_addr(scsi3addr);
2490                 }
2491
2492                 if (!pqi_is_supported_device(device))
2493                         continue;
2494
2495                 /* Gather information about the device. */
2496                 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2497                 if (rc == -ENOMEM) {
2498                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2499                                 out_of_memory_msg);
2500                         goto out;
2501                 }
2502                 if (rc) {
2503                         if (device->is_physical_device)
2504                                 dev_warn(&ctrl_info->pci_dev->dev,
2505                                         "obtaining device info failed, skipping physical device %016llx%016llx\n",
2506                                         get_unaligned_be64(&phys_lun->wwid[0]),
2507                                         get_unaligned_be64(&phys_lun->wwid[8]));
2508                         else
2509                                 dev_warn(&ctrl_info->pci_dev->dev,
2510                                         "obtaining device info failed, skipping logical device %08x%08x\n",
2511                                         *((u32 *)&device->scsi3addr),
2512                                         *((u32 *)&device->scsi3addr[4]));
2513                         rc = 0;
2514                         continue;
2515                 }
2516
2517                 /* Do not present disks that the OS cannot fully probe. */
2518                 if (pqi_keep_device_offline(device))
2519                         continue;
2520
2521                 pqi_assign_bus_target_lun(device);
2522
2523                 if (device->is_physical_device) {
2524                         memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2525                         if ((phys_lun->device_flags &
2526                                 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2527                                 phys_lun->aio_handle) {
2528                                         device->aio_enabled = true;
2529                                         device->aio_handle =
2530                                                 phys_lun->aio_handle;
2531                         }
2532                 } else {
2533                         memcpy(device->volume_id, log_lun->volume_id,
2534                                 sizeof(device->volume_id));
2535                 }
2536
2537                 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2538
2539                 new_device_list[num_valid_devices++] = device;
2540         }
2541
2542         pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2543
2544 out:
2545         list_for_each_entry_safe(device, next, &new_device_list_head,
2546                 new_device_list_entry) {
2547                 if (device->keep_device)
2548                         continue;
2549                 list_del(&device->new_device_list_entry);
2550                 pqi_free_device(device);
2551         }
2552
2553         kfree(new_device_list);
2554         kfree(physdev_list);
2555         kfree(logdev_list);
2556         kfree(id_phys);
2557
2558         return rc;
2559 }
2560
2561 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2562 {
2563         int rc;
2564         int mutex_acquired;
2565
2566         if (pqi_ctrl_offline(ctrl_info))
2567                 return -ENXIO;
2568
2569         mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2570
2571         if (!mutex_acquired) {
2572                 if (pqi_ctrl_scan_blocked(ctrl_info))
2573                         return -EBUSY;
2574                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2575                 return -EINPROGRESS;
2576         }
2577
2578         rc = pqi_update_scsi_devices(ctrl_info);
2579         if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2580                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2581
2582         mutex_unlock(&ctrl_info->scan_mutex);
2583
2584         return rc;
2585 }
2586
2587 static void pqi_scan_start(struct Scsi_Host *shost)
2588 {
2589         struct pqi_ctrl_info *ctrl_info;
2590
2591         ctrl_info = shost_to_hba(shost);
2592
2593         pqi_scan_scsi_devices(ctrl_info);
2594 }
2595
2596 /* Returns TRUE if scan is finished. */
2597
2598 static int pqi_scan_finished(struct Scsi_Host *shost,
2599         unsigned long elapsed_time)
2600 {
2601         struct pqi_ctrl_info *ctrl_info;
2602
2603         ctrl_info = shost_priv(shost);
2604
2605         return !mutex_is_locked(&ctrl_info->scan_mutex);
2606 }
2607
2608 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2609         struct raid_map *raid_map, u64 first_block)
2610 {
2611         u32 volume_blk_size;
2612
2613         /*
2614          * Set the encryption tweak values based on logical block address.
2615          * If the block size is 512, the tweak value is equal to the LBA.
2616          * For other block sizes, tweak value is (LBA * block size) / 512.
2617          */
2618         volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2619         if (volume_blk_size != 512)
2620                 first_block = (first_block * volume_blk_size) / 512;
2621
2622         encryption_info->data_encryption_key_index =
2623                 get_unaligned_le16(&raid_map->data_encryption_key_index);
2624         encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2625         encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2626 }
2627
2628 /*
2629  * Attempt to perform RAID bypass mapping for a logical volume I/O.
2630  */
2631
2632 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2633         struct pqi_scsi_dev_raid_map_data *rmd)
2634 {
2635         bool is_supported = true;
2636
2637         switch (rmd->raid_level) {
2638         case SA_RAID_0:
2639                 break;
2640         case SA_RAID_1:
2641                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2642                         rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2643                         is_supported = false;
2644                 break;
2645         case SA_RAID_TRIPLE:
2646                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2647                         rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2648                         is_supported = false;
2649                 break;
2650         case SA_RAID_5:
2651                 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2652                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2653                         is_supported = false;
2654                 break;
2655         case SA_RAID_6:
2656                 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2657                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2658                         is_supported = false;
2659                 break;
2660         default:
2661                 is_supported = false;
2662                 break;
2663         }
2664
2665         return is_supported;
2666 }
2667
2668 #define PQI_RAID_BYPASS_INELIGIBLE      1
2669
2670 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2671         struct pqi_scsi_dev_raid_map_data *rmd)
2672 {
2673         /* Check for valid opcode, get LBA and block count. */
2674         switch (scmd->cmnd[0]) {
2675         case WRITE_6:
2676                 rmd->is_write = true;
2677                 fallthrough;
2678         case READ_6:
2679                 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2680                         (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2681                 rmd->block_cnt = (u32)scmd->cmnd[4];
2682                 if (rmd->block_cnt == 0)
2683                         rmd->block_cnt = 256;
2684                 break;
2685         case WRITE_10:
2686                 rmd->is_write = true;
2687                 fallthrough;
2688         case READ_10:
2689                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2690                 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2691                 break;
2692         case WRITE_12:
2693                 rmd->is_write = true;
2694                 fallthrough;
2695         case READ_12:
2696                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2697                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2698                 break;
2699         case WRITE_16:
2700                 rmd->is_write = true;
2701                 fallthrough;
2702         case READ_16:
2703                 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2704                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2705                 break;
2706         default:
2707                 /* Process via normal I/O path. */
2708                 return PQI_RAID_BYPASS_INELIGIBLE;
2709         }
2710
2711         put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2712
2713         return 0;
2714 }
2715
2716 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2717         struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2718 {
2719 #if BITS_PER_LONG == 32
2720         u64 tmpdiv;
2721 #endif
2722
2723         rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2724
2725         /* Check for invalid block or wraparound. */
2726         if (rmd->last_block >=
2727                 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2728                 rmd->last_block < rmd->first_block)
2729                 return PQI_RAID_BYPASS_INELIGIBLE;
2730
2731         rmd->data_disks_per_row =
2732                 get_unaligned_le16(&raid_map->data_disks_per_row);
2733         rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2734         rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2735
2736         /* Calculate stripe information for the request. */
2737         rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2738         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2739                 return PQI_RAID_BYPASS_INELIGIBLE;
2740 #if BITS_PER_LONG == 32
2741         tmpdiv = rmd->first_block;
2742         do_div(tmpdiv, rmd->blocks_per_row);
2743         rmd->first_row = tmpdiv;
2744         tmpdiv = rmd->last_block;
2745         do_div(tmpdiv, rmd->blocks_per_row);
2746         rmd->last_row = tmpdiv;
2747         rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2748         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2749         tmpdiv = rmd->first_row_offset;
2750         do_div(tmpdiv, rmd->strip_size);
2751         rmd->first_column = tmpdiv;
2752         tmpdiv = rmd->last_row_offset;
2753         do_div(tmpdiv, rmd->strip_size);
2754         rmd->last_column = tmpdiv;
2755 #else
2756         rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2757         rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2758         rmd->first_row_offset = (u32)(rmd->first_block -
2759                 (rmd->first_row * rmd->blocks_per_row));
2760         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2761                 rmd->blocks_per_row));
2762         rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2763         rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2764 #endif
2765
2766         /* If this isn't a single row/column then give to the controller. */
2767         if (rmd->first_row != rmd->last_row ||
2768                 rmd->first_column != rmd->last_column)
2769                 return PQI_RAID_BYPASS_INELIGIBLE;
2770
2771         /* Proceeding with driver mapping. */
2772         rmd->total_disks_per_row = rmd->data_disks_per_row +
2773                 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2774         rmd->map_row = ((u32)(rmd->first_row >>
2775                 raid_map->parity_rotation_shift)) %
2776                 get_unaligned_le16(&raid_map->row_cnt);
2777         rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2778                 rmd->first_column;
2779
2780         return 0;
2781 }
2782
2783 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2784         struct raid_map *raid_map)
2785 {
2786 #if BITS_PER_LONG == 32
2787         u64 tmpdiv;
2788 #endif
2789
2790         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2791                 return PQI_RAID_BYPASS_INELIGIBLE;
2792
2793         /* RAID 50/60 */
2794         /* Verify first and last block are in same RAID group. */
2795         rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2796 #if BITS_PER_LONG == 32
2797         tmpdiv = rmd->first_block;
2798         rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2799         tmpdiv = rmd->first_group;
2800         do_div(tmpdiv, rmd->blocks_per_row);
2801         rmd->first_group = tmpdiv;
2802         tmpdiv = rmd->last_block;
2803         rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2804         tmpdiv = rmd->last_group;
2805         do_div(tmpdiv, rmd->blocks_per_row);
2806         rmd->last_group = tmpdiv;
2807 #else
2808         rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2809         rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2810 #endif
2811         if (rmd->first_group != rmd->last_group)
2812                 return PQI_RAID_BYPASS_INELIGIBLE;
2813
2814         /* Verify request is in a single row of RAID 5/6. */
2815 #if BITS_PER_LONG == 32
2816         tmpdiv = rmd->first_block;
2817         do_div(tmpdiv, rmd->stripesize);
2818         rmd->first_row = tmpdiv;
2819         rmd->r5or6_first_row = tmpdiv;
2820         tmpdiv = rmd->last_block;
2821         do_div(tmpdiv, rmd->stripesize);
2822         rmd->r5or6_last_row = tmpdiv;
2823 #else
2824         rmd->first_row = rmd->r5or6_first_row =
2825                 rmd->first_block / rmd->stripesize;
2826         rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2827 #endif
2828         if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2829                 return PQI_RAID_BYPASS_INELIGIBLE;
2830
2831         /* Verify request is in a single column. */
2832 #if BITS_PER_LONG == 32
2833         tmpdiv = rmd->first_block;
2834         rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2835         tmpdiv = rmd->first_row_offset;
2836         rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2837         rmd->r5or6_first_row_offset = rmd->first_row_offset;
2838         tmpdiv = rmd->last_block;
2839         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2840         tmpdiv = rmd->r5or6_last_row_offset;
2841         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2842         tmpdiv = rmd->r5or6_first_row_offset;
2843         do_div(tmpdiv, rmd->strip_size);
2844         rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2845         tmpdiv = rmd->r5or6_last_row_offset;
2846         do_div(tmpdiv, rmd->strip_size);
2847         rmd->r5or6_last_column = tmpdiv;
2848 #else
2849         rmd->first_row_offset = rmd->r5or6_first_row_offset =
2850                 (u32)((rmd->first_block % rmd->stripesize) %
2851                 rmd->blocks_per_row);
2852
2853         rmd->r5or6_last_row_offset =
2854                 (u32)((rmd->last_block % rmd->stripesize) %
2855                 rmd->blocks_per_row);
2856
2857         rmd->first_column =
2858                 rmd->r5or6_first_row_offset / rmd->strip_size;
2859         rmd->r5or6_first_column = rmd->first_column;
2860         rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2861 #endif
2862         if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2863                 return PQI_RAID_BYPASS_INELIGIBLE;
2864
2865         /* Request is eligible. */
2866         rmd->map_row =
2867                 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2868                 get_unaligned_le16(&raid_map->row_cnt);
2869
2870         rmd->map_index = (rmd->first_group *
2871                 (get_unaligned_le16(&raid_map->row_cnt) *
2872                 rmd->total_disks_per_row)) +
2873                 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2874
2875         if (rmd->is_write) {
2876                 u32 index;
2877
2878                 /*
2879                  * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2880                  * parity entries inside the device's raid_map.
2881                  *
2882                  * A device's RAID map is bounded by: number of RAID disks squared.
2883                  *
2884                  * The devices RAID map size is checked during device
2885                  * initialization.
2886                  */
2887                 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2888                 index *= rmd->total_disks_per_row;
2889                 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2890
2891                 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2892                 if (rmd->raid_level == SA_RAID_6) {
2893                         rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2894                         rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2895                 }
2896 #if BITS_PER_LONG == 32
2897                 tmpdiv = rmd->first_block;
2898                 do_div(tmpdiv, rmd->blocks_per_row);
2899                 rmd->row = tmpdiv;
2900 #else
2901                 rmd->row = rmd->first_block / rmd->blocks_per_row;
2902 #endif
2903         }
2904
2905         return 0;
2906 }
2907
2908 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2909 {
2910         /* Build the new CDB for the physical disk I/O. */
2911         if (rmd->disk_block > 0xffffffff) {
2912                 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2913                 rmd->cdb[1] = 0;
2914                 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2915                 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2916                 rmd->cdb[14] = 0;
2917                 rmd->cdb[15] = 0;
2918                 rmd->cdb_length = 16;
2919         } else {
2920                 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2921                 rmd->cdb[1] = 0;
2922                 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2923                 rmd->cdb[6] = 0;
2924                 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2925                 rmd->cdb[9] = 0;
2926                 rmd->cdb_length = 10;
2927         }
2928 }
2929
2930 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2931         struct pqi_scsi_dev_raid_map_data *rmd)
2932 {
2933         u32 index;
2934         u32 group;
2935
2936         group = rmd->map_index / rmd->data_disks_per_row;
2937
2938         index = rmd->map_index - (group * rmd->data_disks_per_row);
2939         rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2940         index += rmd->data_disks_per_row;
2941         rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2942         if (rmd->layout_map_count > 2) {
2943                 index += rmd->data_disks_per_row;
2944                 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2945         }
2946
2947         rmd->num_it_nexus_entries = rmd->layout_map_count;
2948 }
2949
2950 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2951         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2952         struct pqi_queue_group *queue_group)
2953 {
2954         int rc;
2955         struct raid_map *raid_map;
2956         u32 group;
2957         u32 next_bypass_group;
2958         struct pqi_encryption_info *encryption_info_ptr;
2959         struct pqi_encryption_info encryption_info;
2960         struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2961
2962         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2963         if (rc)
2964                 return PQI_RAID_BYPASS_INELIGIBLE;
2965
2966         rmd.raid_level = device->raid_level;
2967
2968         if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2969                 return PQI_RAID_BYPASS_INELIGIBLE;
2970
2971         if (unlikely(rmd.block_cnt == 0))
2972                 return PQI_RAID_BYPASS_INELIGIBLE;
2973
2974         raid_map = device->raid_map;
2975
2976         rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2977         if (rc)
2978                 return PQI_RAID_BYPASS_INELIGIBLE;
2979
2980         if (device->raid_level == SA_RAID_1 ||
2981                 device->raid_level == SA_RAID_TRIPLE) {
2982                 if (rmd.is_write) {
2983                         pqi_calc_aio_r1_nexus(raid_map, &rmd);
2984                 } else {
2985                         group = device->next_bypass_group[rmd.map_index];
2986                         next_bypass_group = group + 1;
2987                         if (next_bypass_group >= rmd.layout_map_count)
2988                                 next_bypass_group = 0;
2989                         device->next_bypass_group[rmd.map_index] = next_bypass_group;
2990                         rmd.map_index += group * rmd.data_disks_per_row;
2991                 }
2992         } else if ((device->raid_level == SA_RAID_5 ||
2993                 device->raid_level == SA_RAID_6) &&
2994                 (rmd.layout_map_count > 1 || rmd.is_write)) {
2995                 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
2996                 if (rc)
2997                         return PQI_RAID_BYPASS_INELIGIBLE;
2998         }
2999
3000         if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
3001                 return PQI_RAID_BYPASS_INELIGIBLE;
3002
3003         rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3004         rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3005                 rmd.first_row * rmd.strip_size +
3006                 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3007         rmd.disk_block_cnt = rmd.block_cnt;
3008
3009         /* Handle differing logical/physical block sizes. */
3010         if (raid_map->phys_blk_shift) {
3011                 rmd.disk_block <<= raid_map->phys_blk_shift;
3012                 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
3013         }
3014
3015         if (unlikely(rmd.disk_block_cnt > 0xffff))
3016                 return PQI_RAID_BYPASS_INELIGIBLE;
3017
3018         pqi_set_aio_cdb(&rmd);
3019
3020         if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
3021                 if (rmd.data_length > device->max_transfer_encrypted)
3022                         return PQI_RAID_BYPASS_INELIGIBLE;
3023                 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3024                 encryption_info_ptr = &encryption_info;
3025         } else {
3026                 encryption_info_ptr = NULL;
3027         }
3028
3029         if (rmd.is_write) {
3030                 switch (device->raid_level) {
3031                 case SA_RAID_1:
3032                 case SA_RAID_TRIPLE:
3033                         return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3034                                 encryption_info_ptr, device, &rmd);
3035                 case SA_RAID_5:
3036                 case SA_RAID_6:
3037                         return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3038                                 encryption_info_ptr, device, &rmd);
3039                 }
3040         }
3041
3042         return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3043                 rmd.cdb, rmd.cdb_length, queue_group,
3044                 encryption_info_ptr, true, false);
3045 }
3046
3047 #define PQI_STATUS_IDLE         0x0
3048
3049 #define PQI_CREATE_ADMIN_QUEUE_PAIR     1
3050 #define PQI_DELETE_ADMIN_QUEUE_PAIR     2
3051
3052 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
3053 #define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
3054 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
3055 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
3056 #define PQI_DEVICE_STATE_ERROR                          0x4
3057
3058 #define PQI_MODE_READY_TIMEOUT_SECS             30
3059 #define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
3060
3061 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3062 {
3063         struct pqi_device_registers __iomem *pqi_registers;
3064         unsigned long timeout;
3065         u64 signature;
3066         u8 status;
3067
3068         pqi_registers = ctrl_info->pqi_registers;
3069         timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3070
3071         while (1) {
3072                 signature = readq(&pqi_registers->signature);
3073                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3074                         sizeof(signature)) == 0)
3075                         break;
3076                 if (time_after(jiffies, timeout)) {
3077                         dev_err(&ctrl_info->pci_dev->dev,
3078                                 "timed out waiting for PQI signature\n");
3079                         return -ETIMEDOUT;
3080                 }
3081                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3082         }
3083
3084         while (1) {
3085                 status = readb(&pqi_registers->function_and_status_code);
3086                 if (status == PQI_STATUS_IDLE)
3087                         break;
3088                 if (time_after(jiffies, timeout)) {
3089                         dev_err(&ctrl_info->pci_dev->dev,
3090                                 "timed out waiting for PQI IDLE\n");
3091                         return -ETIMEDOUT;
3092                 }
3093                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3094         }
3095
3096         while (1) {
3097                 if (readl(&pqi_registers->device_status) ==
3098                         PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3099                         break;
3100                 if (time_after(jiffies, timeout)) {
3101                         dev_err(&ctrl_info->pci_dev->dev,
3102                                 "timed out waiting for PQI all registers ready\n");
3103                         return -ETIMEDOUT;
3104                 }
3105                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3106         }
3107
3108         return 0;
3109 }
3110
3111 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3112 {
3113         struct pqi_scsi_dev *device;
3114
3115         device = io_request->scmd->device->hostdata;
3116         device->raid_bypass_enabled = false;
3117         device->aio_enabled = false;
3118 }
3119
3120 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3121 {
3122         struct pqi_ctrl_info *ctrl_info;
3123         struct pqi_scsi_dev *device;
3124
3125         device = sdev->hostdata;
3126         if (device->device_offline)
3127                 return;
3128
3129         device->device_offline = true;
3130         ctrl_info = shost_to_hba(sdev->host);
3131         pqi_schedule_rescan_worker(ctrl_info);
3132         dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3133                 path, ctrl_info->scsi_host->host_no, device->bus,
3134                 device->target, device->lun);
3135 }
3136
3137 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3138 {
3139         u8 scsi_status;
3140         u8 host_byte;
3141         struct scsi_cmnd *scmd;
3142         struct pqi_raid_error_info *error_info;
3143         size_t sense_data_length;
3144         int residual_count;
3145         int xfer_count;
3146         struct scsi_sense_hdr sshdr;
3147
3148         scmd = io_request->scmd;
3149         if (!scmd)
3150                 return;
3151
3152         error_info = io_request->error_info;
3153         scsi_status = error_info->status;
3154         host_byte = DID_OK;
3155
3156         switch (error_info->data_out_result) {
3157         case PQI_DATA_IN_OUT_GOOD:
3158                 break;
3159         case PQI_DATA_IN_OUT_UNDERFLOW:
3160                 xfer_count =
3161                         get_unaligned_le32(&error_info->data_out_transferred);
3162                 residual_count = scsi_bufflen(scmd) - xfer_count;
3163                 scsi_set_resid(scmd, residual_count);
3164                 if (xfer_count < scmd->underflow)
3165                         host_byte = DID_SOFT_ERROR;
3166                 break;
3167         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3168         case PQI_DATA_IN_OUT_ABORTED:
3169                 host_byte = DID_ABORT;
3170                 break;
3171         case PQI_DATA_IN_OUT_TIMEOUT:
3172                 host_byte = DID_TIME_OUT;
3173                 break;
3174         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3175         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3176         case PQI_DATA_IN_OUT_BUFFER_ERROR:
3177         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3178         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3179         case PQI_DATA_IN_OUT_ERROR:
3180         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3181         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3182         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3183         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3184         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3185         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3186         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3187         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3188         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3189         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3190         default:
3191                 host_byte = DID_ERROR;
3192                 break;
3193         }
3194
3195         sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3196         if (sense_data_length == 0)
3197                 sense_data_length =
3198                         get_unaligned_le16(&error_info->response_data_length);
3199         if (sense_data_length) {
3200                 if (sense_data_length > sizeof(error_info->data))
3201                         sense_data_length = sizeof(error_info->data);
3202
3203                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3204                         scsi_normalize_sense(error_info->data,
3205                                 sense_data_length, &sshdr) &&
3206                                 sshdr.sense_key == HARDWARE_ERROR &&
3207                                 sshdr.asc == 0x3e) {
3208                         struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3209                         struct pqi_scsi_dev *device = scmd->device->hostdata;
3210
3211                         switch (sshdr.ascq) {
3212                         case 0x1: /* LOGICAL UNIT FAILURE */
3213                                 if (printk_ratelimit())
3214                                         scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3215                                                 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3216                                 pqi_take_device_offline(scmd->device, "RAID");
3217                                 host_byte = DID_NO_CONNECT;
3218                                 break;
3219
3220                         default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3221                                 if (printk_ratelimit())
3222                                         scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3223                                                 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3224                                 break;
3225                         }
3226                 }
3227
3228                 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3229                         sense_data_length = SCSI_SENSE_BUFFERSIZE;
3230                 memcpy(scmd->sense_buffer, error_info->data,
3231                         sense_data_length);
3232         }
3233
3234         scmd->result = scsi_status;
3235         set_host_byte(scmd, host_byte);
3236 }
3237
3238 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3239 {
3240         u8 scsi_status;
3241         u8 host_byte;
3242         struct scsi_cmnd *scmd;
3243         struct pqi_aio_error_info *error_info;
3244         size_t sense_data_length;
3245         int residual_count;
3246         int xfer_count;
3247         bool device_offline;
3248         struct pqi_scsi_dev *device;
3249
3250         scmd = io_request->scmd;
3251         error_info = io_request->error_info;
3252         host_byte = DID_OK;
3253         sense_data_length = 0;
3254         device_offline = false;
3255         device = scmd->device->hostdata;
3256
3257         switch (error_info->service_response) {
3258         case PQI_AIO_SERV_RESPONSE_COMPLETE:
3259                 scsi_status = error_info->status;
3260                 break;
3261         case PQI_AIO_SERV_RESPONSE_FAILURE:
3262                 switch (error_info->status) {
3263                 case PQI_AIO_STATUS_IO_ABORTED:
3264                         scsi_status = SAM_STAT_TASK_ABORTED;
3265                         break;
3266                 case PQI_AIO_STATUS_UNDERRUN:
3267                         scsi_status = SAM_STAT_GOOD;
3268                         residual_count = get_unaligned_le32(
3269                                                 &error_info->residual_count);
3270                         scsi_set_resid(scmd, residual_count);
3271                         xfer_count = scsi_bufflen(scmd) - residual_count;
3272                         if (xfer_count < scmd->underflow)
3273                                 host_byte = DID_SOFT_ERROR;
3274                         break;
3275                 case PQI_AIO_STATUS_OVERRUN:
3276                         scsi_status = SAM_STAT_GOOD;
3277                         break;
3278                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3279                         pqi_aio_path_disabled(io_request);
3280                         if (pqi_is_multipath_device(device)) {
3281                                 pqi_device_remove_start(device);
3282                                 host_byte = DID_NO_CONNECT;
3283                                 scsi_status = SAM_STAT_CHECK_CONDITION;
3284                         } else {
3285                                 scsi_status = SAM_STAT_GOOD;
3286                                 io_request->status = -EAGAIN;
3287                         }
3288                         break;
3289                 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3290                 case PQI_AIO_STATUS_INVALID_DEVICE:
3291                         if (!io_request->raid_bypass) {
3292                                 device_offline = true;
3293                                 pqi_take_device_offline(scmd->device, "AIO");
3294                                 host_byte = DID_NO_CONNECT;
3295                         }
3296                         scsi_status = SAM_STAT_CHECK_CONDITION;
3297                         break;
3298                 case PQI_AIO_STATUS_IO_ERROR:
3299                 default:
3300                         scsi_status = SAM_STAT_CHECK_CONDITION;
3301                         break;
3302                 }
3303                 break;
3304         case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3305         case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3306                 scsi_status = SAM_STAT_GOOD;
3307                 break;
3308         case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3309         case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3310         default:
3311                 scsi_status = SAM_STAT_CHECK_CONDITION;
3312                 break;
3313         }
3314
3315         if (error_info->data_present) {
3316                 sense_data_length =
3317                         get_unaligned_le16(&error_info->data_length);
3318                 if (sense_data_length) {
3319                         if (sense_data_length > sizeof(error_info->data))
3320                                 sense_data_length = sizeof(error_info->data);
3321                         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3322                                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3323                         memcpy(scmd->sense_buffer, error_info->data,
3324                                 sense_data_length);
3325                 }
3326         }
3327
3328         if (device_offline && sense_data_length == 0)
3329                 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3330
3331         scmd->result = scsi_status;
3332         set_host_byte(scmd, host_byte);
3333 }
3334
3335 static void pqi_process_io_error(unsigned int iu_type,
3336         struct pqi_io_request *io_request)
3337 {
3338         switch (iu_type) {
3339         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3340                 pqi_process_raid_io_error(io_request);
3341                 break;
3342         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3343                 pqi_process_aio_io_error(io_request);
3344                 break;
3345         }
3346 }
3347
3348 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3349         struct pqi_task_management_response *response)
3350 {
3351         int rc;
3352
3353         switch (response->response_code) {
3354         case SOP_TMF_COMPLETE:
3355         case SOP_TMF_FUNCTION_SUCCEEDED:
3356                 rc = 0;
3357                 break;
3358         case SOP_TMF_REJECTED:
3359                 rc = -EAGAIN;
3360                 break;
3361         case SOP_TMF_INCORRECT_LOGICAL_UNIT:
3362                 rc = -ENODEV;
3363                 break;
3364         default:
3365                 rc = -EIO;
3366                 break;
3367         }
3368
3369         if (rc)
3370                 dev_err(&ctrl_info->pci_dev->dev,
3371                         "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3372
3373         return rc;
3374 }
3375
3376 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3377         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3378 {
3379         pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3380 }
3381
3382 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3383 {
3384         int num_responses;
3385         pqi_index_t oq_pi;
3386         pqi_index_t oq_ci;
3387         struct pqi_io_request *io_request;
3388         struct pqi_io_response *response;
3389         u16 request_id;
3390
3391         num_responses = 0;
3392         oq_ci = queue_group->oq_ci_copy;
3393
3394         while (1) {
3395                 oq_pi = readl(queue_group->oq_pi);
3396                 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3397                         pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3398                         dev_err(&ctrl_info->pci_dev->dev,
3399                                 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3400                                 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3401                         return -1;
3402                 }
3403                 if (oq_pi == oq_ci)
3404                         break;
3405
3406                 num_responses++;
3407                 response = queue_group->oq_element_array +
3408                         (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3409
3410                 request_id = get_unaligned_le16(&response->request_id);
3411                 if (request_id >= ctrl_info->max_io_slots) {
3412                         pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3413                         dev_err(&ctrl_info->pci_dev->dev,
3414                                 "request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
3415                                 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3416                         return -1;
3417                 }
3418
3419                 io_request = &ctrl_info->io_request_pool[request_id];
3420                 if (atomic_read(&io_request->refcount) == 0) {
3421                         pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3422                         dev_err(&ctrl_info->pci_dev->dev,
3423                                 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
3424                                 request_id, oq_pi, oq_ci);
3425                         return -1;
3426                 }
3427
3428                 switch (response->header.iu_type) {
3429                 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3430                 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3431                         if (io_request->scmd)
3432                                 io_request->scmd->result = 0;
3433                         fallthrough;
3434                 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3435                         break;
3436                 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3437                         io_request->status =
3438                                 get_unaligned_le16(
3439                                 &((struct pqi_vendor_general_response *)response)->status);
3440                         break;
3441                 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3442                         io_request->status = pqi_interpret_task_management_response(ctrl_info,
3443                                 (void *)response);
3444                         break;
3445                 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3446                         pqi_aio_path_disabled(io_request);
3447                         io_request->status = -EAGAIN;
3448                         break;
3449                 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3450                 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3451                         io_request->error_info = ctrl_info->error_buffer +
3452                                 (get_unaligned_le16(&response->error_index) *
3453                                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3454                         pqi_process_io_error(response->header.iu_type, io_request);
3455                         break;
3456                 default:
3457                         pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3458                         dev_err(&ctrl_info->pci_dev->dev,
3459                                 "unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
3460                                 response->header.iu_type, oq_pi, oq_ci);
3461                         return -1;
3462                 }
3463
3464                 io_request->io_complete_callback(io_request, io_request->context);
3465
3466                 /*
3467                  * Note that the I/O request structure CANNOT BE TOUCHED after
3468                  * returning from the I/O completion callback!
3469                  */
3470                 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3471         }
3472
3473         if (num_responses) {
3474                 queue_group->oq_ci_copy = oq_ci;
3475                 writel(oq_ci, queue_group->oq_ci);
3476         }
3477
3478         return num_responses;
3479 }
3480
3481 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3482         unsigned int ci, unsigned int elements_in_queue)
3483 {
3484         unsigned int num_elements_used;
3485
3486         if (pi >= ci)
3487                 num_elements_used = pi - ci;
3488         else
3489                 num_elements_used = elements_in_queue - ci + pi;
3490
3491         return elements_in_queue - num_elements_used - 1;
3492 }
3493
3494 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3495         struct pqi_event_acknowledge_request *iu, size_t iu_length)
3496 {
3497         pqi_index_t iq_pi;
3498         pqi_index_t iq_ci;
3499         unsigned long flags;
3500         void *next_element;
3501         struct pqi_queue_group *queue_group;
3502
3503         queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3504         put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3505
3506         while (1) {
3507                 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3508
3509                 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3510                 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3511
3512                 if (pqi_num_elements_free(iq_pi, iq_ci,
3513                         ctrl_info->num_elements_per_iq))
3514                         break;
3515
3516                 spin_unlock_irqrestore(
3517                         &queue_group->submit_lock[RAID_PATH], flags);
3518
3519                 if (pqi_ctrl_offline(ctrl_info))
3520                         return;
3521         }
3522
3523         next_element = queue_group->iq_element_array[RAID_PATH] +
3524                 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3525
3526         memcpy(next_element, iu, iu_length);
3527
3528         iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3529         queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3530
3531         /*
3532          * This write notifies the controller that an IU is available to be
3533          * processed.
3534          */
3535         writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3536
3537         spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3538 }
3539
3540 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3541         struct pqi_event *event)
3542 {
3543         struct pqi_event_acknowledge_request request;
3544
3545         memset(&request, 0, sizeof(request));
3546
3547         request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3548         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3549                 &request.header.iu_length);
3550         request.event_type = event->event_type;
3551         put_unaligned_le16(event->event_id, &request.event_id);
3552         put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3553
3554         pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3555 }
3556
3557 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS              30
3558 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS        1
3559
3560 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3561         struct pqi_ctrl_info *ctrl_info)
3562 {
3563         u8 status;
3564         unsigned long timeout;
3565
3566         timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3567
3568         while (1) {
3569                 status = pqi_read_soft_reset_status(ctrl_info);
3570                 if (status & PQI_SOFT_RESET_INITIATE)
3571                         return RESET_INITIATE_DRIVER;
3572
3573                 if (status & PQI_SOFT_RESET_ABORT)
3574                         return RESET_ABORT;
3575
3576                 if (!sis_is_firmware_running(ctrl_info))
3577                         return RESET_NORESPONSE;
3578
3579                 if (time_after(jiffies, timeout)) {
3580                         dev_warn(&ctrl_info->pci_dev->dev,
3581                                 "timed out waiting for soft reset status\n");
3582                         return RESET_TIMEDOUT;
3583                 }
3584
3585                 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3586         }
3587 }
3588
3589 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3590 {
3591         int rc;
3592         unsigned int delay_secs;
3593         enum pqi_soft_reset_status reset_status;
3594
3595         if (ctrl_info->soft_reset_handshake_supported)
3596                 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3597         else
3598                 reset_status = RESET_INITIATE_FIRMWARE;
3599
3600         delay_secs = PQI_POST_RESET_DELAY_SECS;
3601
3602         switch (reset_status) {
3603         case RESET_TIMEDOUT:
3604                 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3605                 fallthrough;
3606         case RESET_INITIATE_DRIVER:
3607                 dev_info(&ctrl_info->pci_dev->dev,
3608                                 "Online Firmware Activation: resetting controller\n");
3609                 sis_soft_reset(ctrl_info);
3610                 fallthrough;
3611         case RESET_INITIATE_FIRMWARE:
3612                 ctrl_info->pqi_mode_enabled = false;
3613                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3614                 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3615                 pqi_ofa_free_host_buffer(ctrl_info);
3616                 pqi_ctrl_ofa_done(ctrl_info);
3617                 dev_info(&ctrl_info->pci_dev->dev,
3618                                 "Online Firmware Activation: %s\n",
3619                                 rc == 0 ? "SUCCESS" : "FAILED");
3620                 break;
3621         case RESET_ABORT:
3622                 dev_info(&ctrl_info->pci_dev->dev,
3623                                 "Online Firmware Activation ABORTED\n");
3624                 if (ctrl_info->soft_reset_handshake_supported)
3625                         pqi_clear_soft_reset_status(ctrl_info);
3626                 pqi_ofa_free_host_buffer(ctrl_info);
3627                 pqi_ctrl_ofa_done(ctrl_info);
3628                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3629                 break;
3630         case RESET_NORESPONSE:
3631                 fallthrough;
3632         default:
3633                 dev_err(&ctrl_info->pci_dev->dev,
3634                         "unexpected Online Firmware Activation reset status: 0x%x\n",
3635                         reset_status);
3636                 pqi_ofa_free_host_buffer(ctrl_info);
3637                 pqi_ctrl_ofa_done(ctrl_info);
3638                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3639                 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3640                 break;
3641         }
3642 }
3643
3644 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3645 {
3646         struct pqi_ctrl_info *ctrl_info;
3647
3648         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3649
3650         pqi_ctrl_ofa_start(ctrl_info);
3651         pqi_ofa_setup_host_buffer(ctrl_info);
3652         pqi_ofa_host_memory_update(ctrl_info);
3653 }
3654
3655 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3656 {
3657         struct pqi_ctrl_info *ctrl_info;
3658         struct pqi_event *event;
3659
3660         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3661
3662         event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3663
3664         pqi_ofa_ctrl_quiesce(ctrl_info);
3665         pqi_acknowledge_event(ctrl_info, event);
3666         pqi_process_soft_reset(ctrl_info);
3667 }
3668
3669 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3670         struct pqi_event *event)
3671 {
3672         bool ack_event;
3673
3674         ack_event = true;
3675
3676         switch (event->event_id) {
3677         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3678                 dev_info(&ctrl_info->pci_dev->dev,
3679                         "received Online Firmware Activation memory allocation request\n");
3680                 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3681                 break;
3682         case PQI_EVENT_OFA_QUIESCE:
3683                 dev_info(&ctrl_info->pci_dev->dev,
3684                         "received Online Firmware Activation quiesce request\n");
3685                 schedule_work(&ctrl_info->ofa_quiesce_work);
3686                 ack_event = false;
3687                 break;
3688         case PQI_EVENT_OFA_CANCELED:
3689                 dev_info(&ctrl_info->pci_dev->dev,
3690                         "received Online Firmware Activation cancel request: reason: %u\n",
3691                         ctrl_info->ofa_cancel_reason);
3692                 pqi_ofa_free_host_buffer(ctrl_info);
3693                 pqi_ctrl_ofa_done(ctrl_info);
3694                 break;
3695         default:
3696                 dev_err(&ctrl_info->pci_dev->dev,
3697                         "received unknown Online Firmware Activation request: event ID: %u\n",
3698                         event->event_id);
3699                 break;
3700         }
3701
3702         return ack_event;
3703 }
3704
3705 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3706 {
3707         unsigned long flags;
3708         struct pqi_scsi_dev *device;
3709
3710         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3711
3712         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3713                 if (device->raid_bypass_enabled)
3714                         device->raid_bypass_enabled = false;
3715
3716         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3717 }
3718
3719 static void pqi_event_worker(struct work_struct *work)
3720 {
3721         unsigned int i;
3722         bool rescan_needed;
3723         struct pqi_ctrl_info *ctrl_info;
3724         struct pqi_event *event;
3725         bool ack_event;
3726
3727         ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3728
3729         pqi_ctrl_busy(ctrl_info);
3730         pqi_wait_if_ctrl_blocked(ctrl_info);
3731         if (pqi_ctrl_offline(ctrl_info))
3732                 goto out;
3733
3734         rescan_needed = false;
3735         event = ctrl_info->events;
3736         for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3737                 if (event->pending) {
3738                         event->pending = false;
3739                         if (event->event_type == PQI_EVENT_TYPE_OFA) {
3740                                 ack_event = pqi_ofa_process_event(ctrl_info, event);
3741                         } else {
3742                                 ack_event = true;
3743                                 rescan_needed = true;
3744                                 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3745                                         ctrl_info->logical_volume_rescan_needed = true;
3746                                 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3747                                         pqi_disable_raid_bypass(ctrl_info);
3748                         }
3749                         if (ack_event)
3750                                 pqi_acknowledge_event(ctrl_info, event);
3751                 }
3752                 event++;
3753         }
3754
3755 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY         (5 * HZ)
3756
3757         if (rescan_needed)
3758                 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3759                         PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3760
3761 out:
3762         pqi_ctrl_unbusy(ctrl_info);
3763 }
3764
3765 #define PQI_HEARTBEAT_TIMER_INTERVAL    (10 * HZ)
3766
3767 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3768 {
3769         int num_interrupts;
3770         u32 heartbeat_count;
3771         struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3772
3773         pqi_check_ctrl_health(ctrl_info);
3774         if (pqi_ctrl_offline(ctrl_info))
3775                 return;
3776
3777         num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3778         heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3779
3780         if (num_interrupts == ctrl_info->previous_num_interrupts) {
3781                 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3782                         dev_err(&ctrl_info->pci_dev->dev,
3783                                 "no heartbeat detected - last heartbeat count: %u\n",
3784                                 heartbeat_count);
3785                         pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3786                         return;
3787                 }
3788         } else {
3789                 ctrl_info->previous_num_interrupts = num_interrupts;
3790         }
3791
3792         ctrl_info->previous_heartbeat_count = heartbeat_count;
3793         mod_timer(&ctrl_info->heartbeat_timer,
3794                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3795 }
3796
3797 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3798 {
3799         if (!ctrl_info->heartbeat_counter)
3800                 return;
3801
3802         ctrl_info->previous_num_interrupts =
3803                 atomic_read(&ctrl_info->num_interrupts);
3804         ctrl_info->previous_heartbeat_count =
3805                 pqi_read_heartbeat_counter(ctrl_info);
3806
3807         ctrl_info->heartbeat_timer.expires =
3808                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3809         add_timer(&ctrl_info->heartbeat_timer);
3810 }
3811
3812 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3813 {
3814         del_timer_sync(&ctrl_info->heartbeat_timer);
3815 }
3816
3817 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3818         struct pqi_event *event, struct pqi_event_response *response)
3819 {
3820         switch (event->event_id) {
3821         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3822                 ctrl_info->ofa_bytes_requested =
3823                         get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3824                 break;
3825         case PQI_EVENT_OFA_CANCELED:
3826                 ctrl_info->ofa_cancel_reason =
3827                         get_unaligned_le16(&response->data.ofa_cancelled.reason);
3828                 break;
3829         }
3830 }
3831
3832 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3833 {
3834         int num_events;
3835         pqi_index_t oq_pi;
3836         pqi_index_t oq_ci;
3837         struct pqi_event_queue *event_queue;
3838         struct pqi_event_response *response;
3839         struct pqi_event *event;
3840         int event_index;
3841
3842         event_queue = &ctrl_info->event_queue;
3843         num_events = 0;
3844         oq_ci = event_queue->oq_ci_copy;
3845
3846         while (1) {
3847                 oq_pi = readl(event_queue->oq_pi);
3848                 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3849                         pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3850                         dev_err(&ctrl_info->pci_dev->dev,
3851                                 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3852                                 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3853                         return -1;
3854                 }
3855
3856                 if (oq_pi == oq_ci)
3857                         break;
3858
3859                 num_events++;
3860                 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3861
3862                 event_index = pqi_event_type_to_event_index(response->event_type);
3863
3864                 if (event_index >= 0 && response->request_acknowledge) {
3865                         event = &ctrl_info->events[event_index];
3866                         event->pending = true;
3867                         event->event_type = response->event_type;
3868                         event->event_id = get_unaligned_le16(&response->event_id);
3869                         event->additional_event_id =
3870                                 get_unaligned_le32(&response->additional_event_id);
3871                         if (event->event_type == PQI_EVENT_TYPE_OFA)
3872                                 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3873                 }
3874
3875                 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3876         }
3877
3878         if (num_events) {
3879                 event_queue->oq_ci_copy = oq_ci;
3880                 writel(oq_ci, event_queue->oq_ci);
3881                 schedule_work(&ctrl_info->event_work);
3882         }
3883
3884         return num_events;
3885 }
3886
3887 #define PQI_LEGACY_INTX_MASK    0x1
3888
3889 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3890 {
3891         u32 intx_mask;
3892         struct pqi_device_registers __iomem *pqi_registers;
3893         volatile void __iomem *register_addr;
3894
3895         pqi_registers = ctrl_info->pqi_registers;
3896
3897         if (enable_intx)
3898                 register_addr = &pqi_registers->legacy_intx_mask_clear;
3899         else
3900                 register_addr = &pqi_registers->legacy_intx_mask_set;
3901
3902         intx_mask = readl(register_addr);
3903         intx_mask |= PQI_LEGACY_INTX_MASK;
3904         writel(intx_mask, register_addr);
3905 }
3906
3907 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3908         enum pqi_irq_mode new_mode)
3909 {
3910         switch (ctrl_info->irq_mode) {
3911         case IRQ_MODE_MSIX:
3912                 switch (new_mode) {
3913                 case IRQ_MODE_MSIX:
3914                         break;
3915                 case IRQ_MODE_INTX:
3916                         pqi_configure_legacy_intx(ctrl_info, true);
3917                         sis_enable_intx(ctrl_info);
3918                         break;
3919                 case IRQ_MODE_NONE:
3920                         break;
3921                 }
3922                 break;
3923         case IRQ_MODE_INTX:
3924                 switch (new_mode) {
3925                 case IRQ_MODE_MSIX:
3926                         pqi_configure_legacy_intx(ctrl_info, false);
3927                         sis_enable_msix(ctrl_info);
3928                         break;
3929                 case IRQ_MODE_INTX:
3930                         break;
3931                 case IRQ_MODE_NONE:
3932                         pqi_configure_legacy_intx(ctrl_info, false);
3933                         break;
3934                 }
3935                 break;
3936         case IRQ_MODE_NONE:
3937                 switch (new_mode) {
3938                 case IRQ_MODE_MSIX:
3939                         sis_enable_msix(ctrl_info);
3940                         break;
3941                 case IRQ_MODE_INTX:
3942                         pqi_configure_legacy_intx(ctrl_info, true);
3943                         sis_enable_intx(ctrl_info);
3944                         break;
3945                 case IRQ_MODE_NONE:
3946                         break;
3947                 }
3948                 break;
3949         }
3950
3951         ctrl_info->irq_mode = new_mode;
3952 }
3953
3954 #define PQI_LEGACY_INTX_PENDING         0x1
3955
3956 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3957 {
3958         bool valid_irq;
3959         u32 intx_status;
3960
3961         switch (ctrl_info->irq_mode) {
3962         case IRQ_MODE_MSIX:
3963                 valid_irq = true;
3964                 break;
3965         case IRQ_MODE_INTX:
3966                 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
3967                 if (intx_status & PQI_LEGACY_INTX_PENDING)
3968                         valid_irq = true;
3969                 else
3970                         valid_irq = false;
3971                 break;
3972         case IRQ_MODE_NONE:
3973         default:
3974                 valid_irq = false;
3975                 break;
3976         }
3977
3978         return valid_irq;
3979 }
3980
3981 static irqreturn_t pqi_irq_handler(int irq, void *data)
3982 {
3983         struct pqi_ctrl_info *ctrl_info;
3984         struct pqi_queue_group *queue_group;
3985         int num_io_responses_handled;
3986         int num_events_handled;
3987
3988         queue_group = data;
3989         ctrl_info = queue_group->ctrl_info;
3990
3991         if (!pqi_is_valid_irq(ctrl_info))
3992                 return IRQ_NONE;
3993
3994         num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3995         if (num_io_responses_handled < 0)
3996                 goto out;
3997
3998         if (irq == ctrl_info->event_irq) {
3999                 num_events_handled = pqi_process_event_intr(ctrl_info);
4000                 if (num_events_handled < 0)
4001                         goto out;
4002         } else {
4003                 num_events_handled = 0;
4004         }
4005
4006         if (num_io_responses_handled + num_events_handled > 0)
4007                 atomic_inc(&ctrl_info->num_interrupts);
4008
4009         pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
4010         pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
4011
4012 out:
4013         return IRQ_HANDLED;
4014 }
4015
4016 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4017 {
4018         struct pci_dev *pci_dev = ctrl_info->pci_dev;
4019         int i;
4020         int rc;
4021
4022         ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
4023
4024         for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
4025                 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
4026                         DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4027                 if (rc) {
4028                         dev_err(&pci_dev->dev,
4029                                 "irq %u init failed with error %d\n",
4030                                 pci_irq_vector(pci_dev, i), rc);
4031                         return rc;
4032                 }
4033                 ctrl_info->num_msix_vectors_initialized++;
4034         }
4035
4036         return 0;
4037 }
4038
4039 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4040 {
4041         int i;
4042
4043         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4044                 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4045                         &ctrl_info->queue_groups[i]);
4046
4047         ctrl_info->num_msix_vectors_initialized = 0;
4048 }
4049
4050 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4051 {
4052         int num_vectors_enabled;
4053         unsigned int flags = PCI_IRQ_MSIX;
4054
4055         if (!pqi_disable_managed_interrupts)
4056                 flags |= PCI_IRQ_AFFINITY;
4057
4058         num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4059                         PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4060                         flags);
4061         if (num_vectors_enabled < 0) {
4062                 dev_err(&ctrl_info->pci_dev->dev,
4063                         "MSI-X init failed with error %d\n",
4064                         num_vectors_enabled);
4065                 return num_vectors_enabled;
4066         }
4067
4068         ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4069         ctrl_info->irq_mode = IRQ_MODE_MSIX;
4070         return 0;
4071 }
4072
4073 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4074 {
4075         if (ctrl_info->num_msix_vectors_enabled) {
4076                 pci_free_irq_vectors(ctrl_info->pci_dev);
4077                 ctrl_info->num_msix_vectors_enabled = 0;
4078         }
4079 }
4080
4081 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4082 {
4083         unsigned int i;
4084         size_t alloc_length;
4085         size_t element_array_length_per_iq;
4086         size_t element_array_length_per_oq;
4087         void *element_array;
4088         void __iomem *next_queue_index;
4089         void *aligned_pointer;
4090         unsigned int num_inbound_queues;
4091         unsigned int num_outbound_queues;
4092         unsigned int num_queue_indexes;
4093         struct pqi_queue_group *queue_group;
4094
4095         element_array_length_per_iq =
4096                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4097                 ctrl_info->num_elements_per_iq;
4098         element_array_length_per_oq =
4099                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4100                 ctrl_info->num_elements_per_oq;
4101         num_inbound_queues = ctrl_info->num_queue_groups * 2;
4102         num_outbound_queues = ctrl_info->num_queue_groups;
4103         num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4104
4105         aligned_pointer = NULL;
4106
4107         for (i = 0; i < num_inbound_queues; i++) {
4108                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4109                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4110                 aligned_pointer += element_array_length_per_iq;
4111         }
4112
4113         for (i = 0; i < num_outbound_queues; i++) {
4114                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4115                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4116                 aligned_pointer += element_array_length_per_oq;
4117         }
4118
4119         aligned_pointer = PTR_ALIGN(aligned_pointer,
4120                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4121         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4122                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4123
4124         for (i = 0; i < num_queue_indexes; i++) {
4125                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4126                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4127                 aligned_pointer += sizeof(pqi_index_t);
4128         }
4129
4130         alloc_length = (size_t)aligned_pointer +
4131                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4132
4133         alloc_length += PQI_EXTRA_SGL_MEMORY;
4134
4135         ctrl_info->queue_memory_base =
4136                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4137                                    &ctrl_info->queue_memory_base_dma_handle,
4138                                    GFP_KERNEL);
4139
4140         if (!ctrl_info->queue_memory_base)
4141                 return -ENOMEM;
4142
4143         ctrl_info->queue_memory_length = alloc_length;
4144
4145         element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4146                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4147
4148         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4149                 queue_group = &ctrl_info->queue_groups[i];
4150                 queue_group->iq_element_array[RAID_PATH] = element_array;
4151                 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4152                         ctrl_info->queue_memory_base_dma_handle +
4153                                 (element_array - ctrl_info->queue_memory_base);
4154                 element_array += element_array_length_per_iq;
4155                 element_array = PTR_ALIGN(element_array,
4156                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4157                 queue_group->iq_element_array[AIO_PATH] = element_array;
4158                 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4159                         ctrl_info->queue_memory_base_dma_handle +
4160                         (element_array - ctrl_info->queue_memory_base);
4161                 element_array += element_array_length_per_iq;
4162                 element_array = PTR_ALIGN(element_array,
4163                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4164         }
4165
4166         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4167                 queue_group = &ctrl_info->queue_groups[i];
4168                 queue_group->oq_element_array = element_array;
4169                 queue_group->oq_element_array_bus_addr =
4170                         ctrl_info->queue_memory_base_dma_handle +
4171                         (element_array - ctrl_info->queue_memory_base);
4172                 element_array += element_array_length_per_oq;
4173                 element_array = PTR_ALIGN(element_array,
4174                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4175         }
4176
4177         ctrl_info->event_queue.oq_element_array = element_array;
4178         ctrl_info->event_queue.oq_element_array_bus_addr =
4179                 ctrl_info->queue_memory_base_dma_handle +
4180                 (element_array - ctrl_info->queue_memory_base);
4181         element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4182                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4183
4184         next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4185                 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4186
4187         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4188                 queue_group = &ctrl_info->queue_groups[i];
4189                 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4190                 queue_group->iq_ci_bus_addr[RAID_PATH] =
4191                         ctrl_info->queue_memory_base_dma_handle +
4192                         (next_queue_index -
4193                         (void __iomem *)ctrl_info->queue_memory_base);
4194                 next_queue_index += sizeof(pqi_index_t);
4195                 next_queue_index = PTR_ALIGN(next_queue_index,
4196                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4197                 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4198                 queue_group->iq_ci_bus_addr[AIO_PATH] =
4199                         ctrl_info->queue_memory_base_dma_handle +
4200                         (next_queue_index -
4201                         (void __iomem *)ctrl_info->queue_memory_base);
4202                 next_queue_index += sizeof(pqi_index_t);
4203                 next_queue_index = PTR_ALIGN(next_queue_index,
4204                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4205                 queue_group->oq_pi = next_queue_index;
4206                 queue_group->oq_pi_bus_addr =
4207                         ctrl_info->queue_memory_base_dma_handle +
4208                         (next_queue_index -
4209                         (void __iomem *)ctrl_info->queue_memory_base);
4210                 next_queue_index += sizeof(pqi_index_t);
4211                 next_queue_index = PTR_ALIGN(next_queue_index,
4212                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4213         }
4214
4215         ctrl_info->event_queue.oq_pi = next_queue_index;
4216         ctrl_info->event_queue.oq_pi_bus_addr =
4217                 ctrl_info->queue_memory_base_dma_handle +
4218                 (next_queue_index -
4219                 (void __iomem *)ctrl_info->queue_memory_base);
4220
4221         return 0;
4222 }
4223
4224 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4225 {
4226         unsigned int i;
4227         u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4228         u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4229
4230         /*
4231          * Initialize the backpointers to the controller structure in
4232          * each operational queue group structure.
4233          */
4234         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4235                 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4236
4237         /*
4238          * Assign IDs to all operational queues.  Note that the IDs
4239          * assigned to operational IQs are independent of the IDs
4240          * assigned to operational OQs.
4241          */
4242         ctrl_info->event_queue.oq_id = next_oq_id++;
4243         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4244                 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4245                 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4246                 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4247         }
4248
4249         /*
4250          * Assign MSI-X table entry indexes to all queues.  Note that the
4251          * interrupt for the event queue is shared with the first queue group.
4252          */
4253         ctrl_info->event_queue.int_msg_num = 0;
4254         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4255                 ctrl_info->queue_groups[i].int_msg_num = i;
4256
4257         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4258                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4259                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4260                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4261                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4262         }
4263 }
4264
4265 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4266 {
4267         size_t alloc_length;
4268         struct pqi_admin_queues_aligned *admin_queues_aligned;
4269         struct pqi_admin_queues *admin_queues;
4270
4271         alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4272                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4273
4274         ctrl_info->admin_queue_memory_base =
4275                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4276                                    &ctrl_info->admin_queue_memory_base_dma_handle,
4277                                    GFP_KERNEL);
4278
4279         if (!ctrl_info->admin_queue_memory_base)
4280                 return -ENOMEM;
4281
4282         ctrl_info->admin_queue_memory_length = alloc_length;
4283
4284         admin_queues = &ctrl_info->admin_queues;
4285         admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4286                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4287         admin_queues->iq_element_array =
4288                 &admin_queues_aligned->iq_element_array;
4289         admin_queues->oq_element_array =
4290                 &admin_queues_aligned->oq_element_array;
4291         admin_queues->iq_ci =
4292                 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4293         admin_queues->oq_pi =
4294                 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4295
4296         admin_queues->iq_element_array_bus_addr =
4297                 ctrl_info->admin_queue_memory_base_dma_handle +
4298                 (admin_queues->iq_element_array -
4299                 ctrl_info->admin_queue_memory_base);
4300         admin_queues->oq_element_array_bus_addr =
4301                 ctrl_info->admin_queue_memory_base_dma_handle +
4302                 (admin_queues->oq_element_array -
4303                 ctrl_info->admin_queue_memory_base);
4304         admin_queues->iq_ci_bus_addr =
4305                 ctrl_info->admin_queue_memory_base_dma_handle +
4306                 ((void __iomem *)admin_queues->iq_ci -
4307                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4308         admin_queues->oq_pi_bus_addr =
4309                 ctrl_info->admin_queue_memory_base_dma_handle +
4310                 ((void __iomem *)admin_queues->oq_pi -
4311                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4312
4313         return 0;
4314 }
4315
4316 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          HZ
4317 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
4318
4319 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4320 {
4321         struct pqi_device_registers __iomem *pqi_registers;
4322         struct pqi_admin_queues *admin_queues;
4323         unsigned long timeout;
4324         u8 status;
4325         u32 reg;
4326
4327         pqi_registers = ctrl_info->pqi_registers;
4328         admin_queues = &ctrl_info->admin_queues;
4329
4330         writeq((u64)admin_queues->iq_element_array_bus_addr,
4331                 &pqi_registers->admin_iq_element_array_addr);
4332         writeq((u64)admin_queues->oq_element_array_bus_addr,
4333                 &pqi_registers->admin_oq_element_array_addr);
4334         writeq((u64)admin_queues->iq_ci_bus_addr,
4335                 &pqi_registers->admin_iq_ci_addr);
4336         writeq((u64)admin_queues->oq_pi_bus_addr,
4337                 &pqi_registers->admin_oq_pi_addr);
4338
4339         reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4340                 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4341                 (admin_queues->int_msg_num << 16);
4342         writel(reg, &pqi_registers->admin_iq_num_elements);
4343
4344         writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4345                 &pqi_registers->function_and_status_code);
4346
4347         timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4348         while (1) {
4349                 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4350                 status = readb(&pqi_registers->function_and_status_code);
4351                 if (status == PQI_STATUS_IDLE)
4352                         break;
4353                 if (time_after(jiffies, timeout))
4354                         return -ETIMEDOUT;
4355         }
4356
4357         /*
4358          * The offset registers are not initialized to the correct
4359          * offsets until *after* the create admin queue pair command
4360          * completes successfully.
4361          */
4362         admin_queues->iq_pi = ctrl_info->iomem_base +
4363                 PQI_DEVICE_REGISTERS_OFFSET +
4364                 readq(&pqi_registers->admin_iq_pi_offset);
4365         admin_queues->oq_ci = ctrl_info->iomem_base +
4366                 PQI_DEVICE_REGISTERS_OFFSET +
4367                 readq(&pqi_registers->admin_oq_ci_offset);
4368
4369         return 0;
4370 }
4371
4372 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4373         struct pqi_general_admin_request *request)
4374 {
4375         struct pqi_admin_queues *admin_queues;
4376         void *next_element;
4377         pqi_index_t iq_pi;
4378
4379         admin_queues = &ctrl_info->admin_queues;
4380         iq_pi = admin_queues->iq_pi_copy;
4381
4382         next_element = admin_queues->iq_element_array +
4383                 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4384
4385         memcpy(next_element, request, sizeof(*request));
4386
4387         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4388         admin_queues->iq_pi_copy = iq_pi;
4389
4390         /*
4391          * This write notifies the controller that an IU is available to be
4392          * processed.
4393          */
4394         writel(iq_pi, admin_queues->iq_pi);
4395 }
4396
4397 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS  60
4398
4399 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4400         struct pqi_general_admin_response *response)
4401 {
4402         struct pqi_admin_queues *admin_queues;
4403         pqi_index_t oq_pi;
4404         pqi_index_t oq_ci;
4405         unsigned long timeout;
4406
4407         admin_queues = &ctrl_info->admin_queues;
4408         oq_ci = admin_queues->oq_ci_copy;
4409
4410         timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4411
4412         while (1) {
4413                 oq_pi = readl(admin_queues->oq_pi);
4414                 if (oq_pi != oq_ci)
4415                         break;
4416                 if (time_after(jiffies, timeout)) {
4417                         dev_err(&ctrl_info->pci_dev->dev,
4418                                 "timed out waiting for admin response\n");
4419                         return -ETIMEDOUT;
4420                 }
4421                 if (!sis_is_firmware_running(ctrl_info))
4422                         return -ENXIO;
4423                 usleep_range(1000, 2000);
4424         }
4425
4426         memcpy(response, admin_queues->oq_element_array +
4427                 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4428
4429         oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4430         admin_queues->oq_ci_copy = oq_ci;
4431         writel(oq_ci, admin_queues->oq_ci);
4432
4433         return 0;
4434 }
4435
4436 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4437         struct pqi_queue_group *queue_group, enum pqi_io_path path,
4438         struct pqi_io_request *io_request)
4439 {
4440         struct pqi_io_request *next;
4441         void *next_element;
4442         pqi_index_t iq_pi;
4443         pqi_index_t iq_ci;
4444         size_t iu_length;
4445         unsigned long flags;
4446         unsigned int num_elements_needed;
4447         unsigned int num_elements_to_end_of_queue;
4448         size_t copy_count;
4449         struct pqi_iu_header *request;
4450
4451         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4452
4453         if (io_request) {
4454                 io_request->queue_group = queue_group;
4455                 list_add_tail(&io_request->request_list_entry,
4456                         &queue_group->request_list[path]);
4457         }
4458
4459         iq_pi = queue_group->iq_pi_copy[path];
4460
4461         list_for_each_entry_safe(io_request, next,
4462                 &queue_group->request_list[path], request_list_entry) {
4463
4464                 request = io_request->iu;
4465
4466                 iu_length = get_unaligned_le16(&request->iu_length) +
4467                         PQI_REQUEST_HEADER_LENGTH;
4468                 num_elements_needed =
4469                         DIV_ROUND_UP(iu_length,
4470                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4471
4472                 iq_ci = readl(queue_group->iq_ci[path]);
4473
4474                 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4475                         ctrl_info->num_elements_per_iq))
4476                         break;
4477
4478                 put_unaligned_le16(queue_group->oq_id,
4479                         &request->response_queue_id);
4480
4481                 next_element = queue_group->iq_element_array[path] +
4482                         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4483
4484                 num_elements_to_end_of_queue =
4485                         ctrl_info->num_elements_per_iq - iq_pi;
4486
4487                 if (num_elements_needed <= num_elements_to_end_of_queue) {
4488                         memcpy(next_element, request, iu_length);
4489                 } else {
4490                         copy_count = num_elements_to_end_of_queue *
4491                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4492                         memcpy(next_element, request, copy_count);
4493                         memcpy(queue_group->iq_element_array[path],
4494                                 (u8 *)request + copy_count,
4495                                 iu_length - copy_count);
4496                 }
4497
4498                 iq_pi = (iq_pi + num_elements_needed) %
4499                         ctrl_info->num_elements_per_iq;
4500
4501                 list_del(&io_request->request_list_entry);
4502         }
4503
4504         if (iq_pi != queue_group->iq_pi_copy[path]) {
4505                 queue_group->iq_pi_copy[path] = iq_pi;
4506                 /*
4507                  * This write notifies the controller that one or more IUs are
4508                  * available to be processed.
4509                  */
4510                 writel(iq_pi, queue_group->iq_pi[path]);
4511         }
4512
4513         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4514 }
4515
4516 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS         10
4517
4518 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4519         struct completion *wait)
4520 {
4521         int rc;
4522
4523         while (1) {
4524                 if (wait_for_completion_io_timeout(wait,
4525                         PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4526                         rc = 0;
4527                         break;
4528                 }
4529
4530                 pqi_check_ctrl_health(ctrl_info);
4531                 if (pqi_ctrl_offline(ctrl_info)) {
4532                         rc = -ENXIO;
4533                         break;
4534                 }
4535         }
4536
4537         return rc;
4538 }
4539
4540 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4541         void *context)
4542 {
4543         struct completion *waiting = context;
4544
4545         complete(waiting);
4546 }
4547
4548 static int pqi_process_raid_io_error_synchronous(
4549         struct pqi_raid_error_info *error_info)
4550 {
4551         int rc = -EIO;
4552
4553         switch (error_info->data_out_result) {
4554         case PQI_DATA_IN_OUT_GOOD:
4555                 if (error_info->status == SAM_STAT_GOOD)
4556                         rc = 0;
4557                 break;
4558         case PQI_DATA_IN_OUT_UNDERFLOW:
4559                 if (error_info->status == SAM_STAT_GOOD ||
4560                         error_info->status == SAM_STAT_CHECK_CONDITION)
4561                         rc = 0;
4562                 break;
4563         case PQI_DATA_IN_OUT_ABORTED:
4564                 rc = PQI_CMD_STATUS_ABORTED;
4565                 break;
4566         }
4567
4568         return rc;
4569 }
4570
4571 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4572 {
4573         return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4574 }
4575
4576 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4577         struct pqi_iu_header *request, unsigned int flags,
4578         struct pqi_raid_error_info *error_info)
4579 {
4580         int rc = 0;
4581         struct pqi_io_request *io_request;
4582         size_t iu_length;
4583         DECLARE_COMPLETION_ONSTACK(wait);
4584
4585         if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4586                 if (down_interruptible(&ctrl_info->sync_request_sem))
4587                         return -ERESTARTSYS;
4588         } else {
4589                 down(&ctrl_info->sync_request_sem);
4590         }
4591
4592         pqi_ctrl_busy(ctrl_info);
4593         /*
4594          * Wait for other admin queue updates such as;
4595          * config table changes, OFA memory updates, ...
4596          */
4597         if (pqi_is_blockable_request(request))
4598                 pqi_wait_if_ctrl_blocked(ctrl_info);
4599
4600         if (pqi_ctrl_offline(ctrl_info)) {
4601                 rc = -ENXIO;
4602                 goto out;
4603         }
4604
4605         io_request = pqi_alloc_io_request(ctrl_info, NULL);
4606
4607         put_unaligned_le16(io_request->index,
4608                 &(((struct pqi_raid_path_request *)request)->request_id));
4609
4610         if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4611                 ((struct pqi_raid_path_request *)request)->error_index =
4612                         ((struct pqi_raid_path_request *)request)->request_id;
4613
4614         iu_length = get_unaligned_le16(&request->iu_length) +
4615                 PQI_REQUEST_HEADER_LENGTH;
4616         memcpy(io_request->iu, request, iu_length);
4617
4618         io_request->io_complete_callback = pqi_raid_synchronous_complete;
4619         io_request->context = &wait;
4620
4621         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4622                 io_request);
4623
4624         pqi_wait_for_completion_io(ctrl_info, &wait);
4625
4626         if (error_info) {
4627                 if (io_request->error_info)
4628                         memcpy(error_info, io_request->error_info, sizeof(*error_info));
4629                 else
4630                         memset(error_info, 0, sizeof(*error_info));
4631         } else if (rc == 0 && io_request->error_info) {
4632                 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4633         }
4634
4635         pqi_free_io_request(io_request);
4636
4637 out:
4638         pqi_ctrl_unbusy(ctrl_info);
4639         up(&ctrl_info->sync_request_sem);
4640
4641         return rc;
4642 }
4643
4644 static int pqi_validate_admin_response(
4645         struct pqi_general_admin_response *response, u8 expected_function_code)
4646 {
4647         if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4648                 return -EINVAL;
4649
4650         if (get_unaligned_le16(&response->header.iu_length) !=
4651                 PQI_GENERAL_ADMIN_IU_LENGTH)
4652                 return -EINVAL;
4653
4654         if (response->function_code != expected_function_code)
4655                 return -EINVAL;
4656
4657         if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4658                 return -EINVAL;
4659
4660         return 0;
4661 }
4662
4663 static int pqi_submit_admin_request_synchronous(
4664         struct pqi_ctrl_info *ctrl_info,
4665         struct pqi_general_admin_request *request,
4666         struct pqi_general_admin_response *response)
4667 {
4668         int rc;
4669
4670         pqi_submit_admin_request(ctrl_info, request);
4671
4672         rc = pqi_poll_for_admin_response(ctrl_info, response);
4673
4674         if (rc == 0)
4675                 rc = pqi_validate_admin_response(response, request->function_code);
4676
4677         return rc;
4678 }
4679
4680 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4681 {
4682         int rc;
4683         struct pqi_general_admin_request request;
4684         struct pqi_general_admin_response response;
4685         struct pqi_device_capability *capability;
4686         struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4687
4688         capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4689         if (!capability)
4690                 return -ENOMEM;
4691
4692         memset(&request, 0, sizeof(request));
4693
4694         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4695         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4696                 &request.header.iu_length);
4697         request.function_code =
4698                 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4699         put_unaligned_le32(sizeof(*capability),
4700                 &request.data.report_device_capability.buffer_length);
4701
4702         rc = pqi_map_single(ctrl_info->pci_dev,
4703                 &request.data.report_device_capability.sg_descriptor,
4704                 capability, sizeof(*capability),
4705                 DMA_FROM_DEVICE);
4706         if (rc)
4707                 goto out;
4708
4709         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4710
4711         pqi_pci_unmap(ctrl_info->pci_dev,
4712                 &request.data.report_device_capability.sg_descriptor, 1,
4713                 DMA_FROM_DEVICE);
4714
4715         if (rc)
4716                 goto out;
4717
4718         if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4719                 rc = -EIO;
4720                 goto out;
4721         }
4722
4723         ctrl_info->max_inbound_queues =
4724                 get_unaligned_le16(&capability->max_inbound_queues);
4725         ctrl_info->max_elements_per_iq =
4726                 get_unaligned_le16(&capability->max_elements_per_iq);
4727         ctrl_info->max_iq_element_length =
4728                 get_unaligned_le16(&capability->max_iq_element_length)
4729                 * 16;
4730         ctrl_info->max_outbound_queues =
4731                 get_unaligned_le16(&capability->max_outbound_queues);
4732         ctrl_info->max_elements_per_oq =
4733                 get_unaligned_le16(&capability->max_elements_per_oq);
4734         ctrl_info->max_oq_element_length =
4735                 get_unaligned_le16(&capability->max_oq_element_length)
4736                 * 16;
4737
4738         sop_iu_layer_descriptor =
4739                 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4740
4741         ctrl_info->max_inbound_iu_length_per_firmware =
4742                 get_unaligned_le16(
4743                         &sop_iu_layer_descriptor->max_inbound_iu_length);
4744         ctrl_info->inbound_spanning_supported =
4745                 sop_iu_layer_descriptor->inbound_spanning_supported;
4746         ctrl_info->outbound_spanning_supported =
4747                 sop_iu_layer_descriptor->outbound_spanning_supported;
4748
4749 out:
4750         kfree(capability);
4751
4752         return rc;
4753 }
4754
4755 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4756 {
4757         if (ctrl_info->max_iq_element_length <
4758                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4759                 dev_err(&ctrl_info->pci_dev->dev,
4760                         "max. inbound queue element length of %d is less than the required length of %d\n",
4761                         ctrl_info->max_iq_element_length,
4762                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4763                 return -EINVAL;
4764         }
4765
4766         if (ctrl_info->max_oq_element_length <
4767                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4768                 dev_err(&ctrl_info->pci_dev->dev,
4769                         "max. outbound queue element length of %d is less than the required length of %d\n",
4770                         ctrl_info->max_oq_element_length,
4771                         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4772                 return -EINVAL;
4773         }
4774
4775         if (ctrl_info->max_inbound_iu_length_per_firmware <
4776                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4777                 dev_err(&ctrl_info->pci_dev->dev,
4778                         "max. inbound IU length of %u is less than the min. required length of %d\n",
4779                         ctrl_info->max_inbound_iu_length_per_firmware,
4780                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4781                 return -EINVAL;
4782         }
4783
4784         if (!ctrl_info->inbound_spanning_supported) {
4785                 dev_err(&ctrl_info->pci_dev->dev,
4786                         "the controller does not support inbound spanning\n");
4787                 return -EINVAL;
4788         }
4789
4790         if (ctrl_info->outbound_spanning_supported) {
4791                 dev_err(&ctrl_info->pci_dev->dev,
4792                         "the controller supports outbound spanning but this driver does not\n");
4793                 return -EINVAL;
4794         }
4795
4796         return 0;
4797 }
4798
4799 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4800 {
4801         int rc;
4802         struct pqi_event_queue *event_queue;
4803         struct pqi_general_admin_request request;
4804         struct pqi_general_admin_response response;
4805
4806         event_queue = &ctrl_info->event_queue;
4807
4808         /*
4809          * Create OQ (Outbound Queue - device to host queue) to dedicate
4810          * to events.
4811          */
4812         memset(&request, 0, sizeof(request));
4813         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4814         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4815                 &request.header.iu_length);
4816         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4817         put_unaligned_le16(event_queue->oq_id,
4818                 &request.data.create_operational_oq.queue_id);
4819         put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4820                 &request.data.create_operational_oq.element_array_addr);
4821         put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4822                 &request.data.create_operational_oq.pi_addr);
4823         put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4824                 &request.data.create_operational_oq.num_elements);
4825         put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4826                 &request.data.create_operational_oq.element_length);
4827         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4828         put_unaligned_le16(event_queue->int_msg_num,
4829                 &request.data.create_operational_oq.int_msg_num);
4830
4831         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4832                 &response);
4833         if (rc)
4834                 return rc;
4835
4836         event_queue->oq_ci = ctrl_info->iomem_base +
4837                 PQI_DEVICE_REGISTERS_OFFSET +
4838                 get_unaligned_le64(
4839                         &response.data.create_operational_oq.oq_ci_offset);
4840
4841         return 0;
4842 }
4843
4844 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4845         unsigned int group_number)
4846 {
4847         int rc;
4848         struct pqi_queue_group *queue_group;
4849         struct pqi_general_admin_request request;
4850         struct pqi_general_admin_response response;
4851
4852         queue_group = &ctrl_info->queue_groups[group_number];
4853
4854         /*
4855          * Create IQ (Inbound Queue - host to device queue) for
4856          * RAID path.
4857          */
4858         memset(&request, 0, sizeof(request));
4859         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4860         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4861                 &request.header.iu_length);
4862         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4863         put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4864                 &request.data.create_operational_iq.queue_id);
4865         put_unaligned_le64(
4866                 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4867                 &request.data.create_operational_iq.element_array_addr);
4868         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4869                 &request.data.create_operational_iq.ci_addr);
4870         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4871                 &request.data.create_operational_iq.num_elements);
4872         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4873                 &request.data.create_operational_iq.element_length);
4874         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4875
4876         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4877                 &response);
4878         if (rc) {
4879                 dev_err(&ctrl_info->pci_dev->dev,
4880                         "error creating inbound RAID queue\n");
4881                 return rc;
4882         }
4883
4884         queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4885                 PQI_DEVICE_REGISTERS_OFFSET +
4886                 get_unaligned_le64(
4887                         &response.data.create_operational_iq.iq_pi_offset);
4888
4889         /*
4890          * Create IQ (Inbound Queue - host to device queue) for
4891          * Advanced I/O (AIO) path.
4892          */
4893         memset(&request, 0, sizeof(request));
4894         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4895         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4896                 &request.header.iu_length);
4897         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4898         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4899                 &request.data.create_operational_iq.queue_id);
4900         put_unaligned_le64((u64)queue_group->
4901                 iq_element_array_bus_addr[AIO_PATH],
4902                 &request.data.create_operational_iq.element_array_addr);
4903         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4904                 &request.data.create_operational_iq.ci_addr);
4905         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4906                 &request.data.create_operational_iq.num_elements);
4907         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4908                 &request.data.create_operational_iq.element_length);
4909         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4910
4911         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4912                 &response);
4913         if (rc) {
4914                 dev_err(&ctrl_info->pci_dev->dev,
4915                         "error creating inbound AIO queue\n");
4916                 return rc;
4917         }
4918
4919         queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4920                 PQI_DEVICE_REGISTERS_OFFSET +
4921                 get_unaligned_le64(
4922                         &response.data.create_operational_iq.iq_pi_offset);
4923
4924         /*
4925          * Designate the 2nd IQ as the AIO path.  By default, all IQs are
4926          * assumed to be for RAID path I/O unless we change the queue's
4927          * property.
4928          */
4929         memset(&request, 0, sizeof(request));
4930         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4931         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4932                 &request.header.iu_length);
4933         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4934         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4935                 &request.data.change_operational_iq_properties.queue_id);
4936         put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4937                 &request.data.change_operational_iq_properties.vendor_specific);
4938
4939         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4940                 &response);
4941         if (rc) {
4942                 dev_err(&ctrl_info->pci_dev->dev,
4943                         "error changing queue property\n");
4944                 return rc;
4945         }
4946
4947         /*
4948          * Create OQ (Outbound Queue - device to host queue).
4949          */
4950         memset(&request, 0, sizeof(request));
4951         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4952         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4953                 &request.header.iu_length);
4954         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4955         put_unaligned_le16(queue_group->oq_id,
4956                 &request.data.create_operational_oq.queue_id);
4957         put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4958                 &request.data.create_operational_oq.element_array_addr);
4959         put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4960                 &request.data.create_operational_oq.pi_addr);
4961         put_unaligned_le16(ctrl_info->num_elements_per_oq,
4962                 &request.data.create_operational_oq.num_elements);
4963         put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4964                 &request.data.create_operational_oq.element_length);
4965         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4966         put_unaligned_le16(queue_group->int_msg_num,
4967                 &request.data.create_operational_oq.int_msg_num);
4968
4969         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4970                 &response);
4971         if (rc) {
4972                 dev_err(&ctrl_info->pci_dev->dev,
4973                         "error creating outbound queue\n");
4974                 return rc;
4975         }
4976
4977         queue_group->oq_ci = ctrl_info->iomem_base +
4978                 PQI_DEVICE_REGISTERS_OFFSET +
4979                 get_unaligned_le64(
4980                         &response.data.create_operational_oq.oq_ci_offset);
4981
4982         return 0;
4983 }
4984
4985 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4986 {
4987         int rc;
4988         unsigned int i;
4989
4990         rc = pqi_create_event_queue(ctrl_info);
4991         if (rc) {
4992                 dev_err(&ctrl_info->pci_dev->dev,
4993                         "error creating event queue\n");
4994                 return rc;
4995         }
4996
4997         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4998                 rc = pqi_create_queue_group(ctrl_info, i);
4999                 if (rc) {
5000                         dev_err(&ctrl_info->pci_dev->dev,
5001                                 "error creating queue group number %u/%u\n",
5002                                 i, ctrl_info->num_queue_groups);
5003                         return rc;
5004                 }
5005         }
5006
5007         return 0;
5008 }
5009
5010 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
5011         struct_size_t(struct pqi_event_config,  descriptors, PQI_MAX_EVENT_DESCRIPTORS)
5012
5013 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
5014         bool enable_events)
5015 {
5016         int rc;
5017         unsigned int i;
5018         struct pqi_event_config *event_config;
5019         struct pqi_event_descriptor *event_descriptor;
5020         struct pqi_general_management_request request;
5021
5022         event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5023                 GFP_KERNEL);
5024         if (!event_config)
5025                 return -ENOMEM;
5026
5027         memset(&request, 0, sizeof(request));
5028
5029         request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5030         put_unaligned_le16(offsetof(struct pqi_general_management_request,
5031                 data.report_event_configuration.sg_descriptors[1]) -
5032                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5033         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5034                 &request.data.report_event_configuration.buffer_length);
5035
5036         rc = pqi_map_single(ctrl_info->pci_dev,
5037                 request.data.report_event_configuration.sg_descriptors,
5038                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5039                 DMA_FROM_DEVICE);
5040         if (rc)
5041                 goto out;
5042
5043         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5044
5045         pqi_pci_unmap(ctrl_info->pci_dev,
5046                 request.data.report_event_configuration.sg_descriptors, 1,
5047                 DMA_FROM_DEVICE);
5048
5049         if (rc)
5050                 goto out;
5051
5052         for (i = 0; i < event_config->num_event_descriptors; i++) {
5053                 event_descriptor = &event_config->descriptors[i];
5054                 if (enable_events &&
5055                         pqi_is_supported_event(event_descriptor->event_type))
5056                                 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5057                                         &event_descriptor->oq_id);
5058                 else
5059                         put_unaligned_le16(0, &event_descriptor->oq_id);
5060         }
5061
5062         memset(&request, 0, sizeof(request));
5063
5064         request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5065         put_unaligned_le16(offsetof(struct pqi_general_management_request,
5066                 data.report_event_configuration.sg_descriptors[1]) -
5067                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5068         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5069                 &request.data.report_event_configuration.buffer_length);
5070
5071         rc = pqi_map_single(ctrl_info->pci_dev,
5072                 request.data.report_event_configuration.sg_descriptors,
5073                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5074                 DMA_TO_DEVICE);
5075         if (rc)
5076                 goto out;
5077
5078         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5079
5080         pqi_pci_unmap(ctrl_info->pci_dev,
5081                 request.data.report_event_configuration.sg_descriptors, 1,
5082                 DMA_TO_DEVICE);
5083
5084 out:
5085         kfree(event_config);
5086
5087         return rc;
5088 }
5089
5090 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5091 {
5092         return pqi_configure_events(ctrl_info, true);
5093 }
5094
5095 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5096 {
5097         unsigned int i;
5098         struct device *dev;
5099         size_t sg_chain_buffer_length;
5100         struct pqi_io_request *io_request;
5101
5102         if (!ctrl_info->io_request_pool)
5103                 return;
5104
5105         dev = &ctrl_info->pci_dev->dev;
5106         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5107         io_request = ctrl_info->io_request_pool;
5108
5109         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5110                 kfree(io_request->iu);
5111                 if (!io_request->sg_chain_buffer)
5112                         break;
5113                 dma_free_coherent(dev, sg_chain_buffer_length,
5114                         io_request->sg_chain_buffer,
5115                         io_request->sg_chain_buffer_dma_handle);
5116                 io_request++;
5117         }
5118
5119         kfree(ctrl_info->io_request_pool);
5120         ctrl_info->io_request_pool = NULL;
5121 }
5122
5123 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5124 {
5125         ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5126                                      ctrl_info->error_buffer_length,
5127                                      &ctrl_info->error_buffer_dma_handle,
5128                                      GFP_KERNEL);
5129         if (!ctrl_info->error_buffer)
5130                 return -ENOMEM;
5131
5132         return 0;
5133 }
5134
5135 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5136 {
5137         unsigned int i;
5138         void *sg_chain_buffer;
5139         size_t sg_chain_buffer_length;
5140         dma_addr_t sg_chain_buffer_dma_handle;
5141         struct device *dev;
5142         struct pqi_io_request *io_request;
5143
5144         ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5145                 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5146
5147         if (!ctrl_info->io_request_pool) {
5148                 dev_err(&ctrl_info->pci_dev->dev,
5149                         "failed to allocate I/O request pool\n");
5150                 goto error;
5151         }
5152
5153         dev = &ctrl_info->pci_dev->dev;
5154         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5155         io_request = ctrl_info->io_request_pool;
5156
5157         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5158                 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5159
5160                 if (!io_request->iu) {
5161                         dev_err(&ctrl_info->pci_dev->dev,
5162                                 "failed to allocate IU buffers\n");
5163                         goto error;
5164                 }
5165
5166                 sg_chain_buffer = dma_alloc_coherent(dev,
5167                         sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5168                         GFP_KERNEL);
5169
5170                 if (!sg_chain_buffer) {
5171                         dev_err(&ctrl_info->pci_dev->dev,
5172                                 "failed to allocate PQI scatter-gather chain buffers\n");
5173                         goto error;
5174                 }
5175
5176                 io_request->index = i;
5177                 io_request->sg_chain_buffer = sg_chain_buffer;
5178                 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5179                 io_request++;
5180         }
5181
5182         return 0;
5183
5184 error:
5185         pqi_free_all_io_requests(ctrl_info);
5186
5187         return -ENOMEM;
5188 }
5189
5190 /*
5191  * Calculate required resources that are sized based on max. outstanding
5192  * requests and max. transfer size.
5193  */
5194
5195 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5196 {
5197         u32 max_transfer_size;
5198         u32 max_sg_entries;
5199
5200         ctrl_info->scsi_ml_can_queue =
5201                 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5202         ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5203
5204         ctrl_info->error_buffer_length =
5205                 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5206
5207         if (reset_devices)
5208                 max_transfer_size = min(ctrl_info->max_transfer_size,
5209                         PQI_MAX_TRANSFER_SIZE_KDUMP);
5210         else
5211                 max_transfer_size = min(ctrl_info->max_transfer_size,
5212                         PQI_MAX_TRANSFER_SIZE);
5213
5214         max_sg_entries = max_transfer_size / PAGE_SIZE;
5215
5216         /* +1 to cover when the buffer is not page-aligned. */
5217         max_sg_entries++;
5218
5219         max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5220
5221         max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5222
5223         ctrl_info->sg_chain_buffer_length =
5224                 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5225                 PQI_EXTRA_SGL_MEMORY;
5226         ctrl_info->sg_tablesize = max_sg_entries;
5227         ctrl_info->max_sectors = max_transfer_size / 512;
5228 }
5229
5230 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5231 {
5232         int num_queue_groups;
5233         u16 num_elements_per_iq;
5234         u16 num_elements_per_oq;
5235
5236         if (reset_devices) {
5237                 num_queue_groups = 1;
5238         } else {
5239                 int num_cpus;
5240                 int max_queue_groups;
5241
5242                 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5243                         ctrl_info->max_outbound_queues - 1);
5244                 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5245
5246                 num_cpus = num_online_cpus();
5247                 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5248                 num_queue_groups = min(num_queue_groups, max_queue_groups);
5249         }
5250
5251         ctrl_info->num_queue_groups = num_queue_groups;
5252
5253         /*
5254          * Make sure that the max. inbound IU length is an even multiple
5255          * of our inbound element length.
5256          */
5257         ctrl_info->max_inbound_iu_length =
5258                 (ctrl_info->max_inbound_iu_length_per_firmware /
5259                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5260                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5261
5262         num_elements_per_iq =
5263                 (ctrl_info->max_inbound_iu_length /
5264                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5265
5266         /* Add one because one element in each queue is unusable. */
5267         num_elements_per_iq++;
5268
5269         num_elements_per_iq = min(num_elements_per_iq,
5270                 ctrl_info->max_elements_per_iq);
5271
5272         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5273         num_elements_per_oq = min(num_elements_per_oq,
5274                 ctrl_info->max_elements_per_oq);
5275
5276         ctrl_info->num_elements_per_iq = num_elements_per_iq;
5277         ctrl_info->num_elements_per_oq = num_elements_per_oq;
5278
5279         ctrl_info->max_sg_per_iu =
5280                 ((ctrl_info->max_inbound_iu_length -
5281                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5282                 sizeof(struct pqi_sg_descriptor)) +
5283                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5284
5285         ctrl_info->max_sg_per_r56_iu =
5286                 ((ctrl_info->max_inbound_iu_length -
5287                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5288                 sizeof(struct pqi_sg_descriptor)) +
5289                 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5290 }
5291
5292 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5293         struct scatterlist *sg)
5294 {
5295         u64 address = (u64)sg_dma_address(sg);
5296         unsigned int length = sg_dma_len(sg);
5297
5298         put_unaligned_le64(address, &sg_descriptor->address);
5299         put_unaligned_le32(length, &sg_descriptor->length);
5300         put_unaligned_le32(0, &sg_descriptor->flags);
5301 }
5302
5303 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5304         struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5305         int max_sg_per_iu, bool *chained)
5306 {
5307         int i;
5308         unsigned int num_sg_in_iu;
5309
5310         *chained = false;
5311         i = 0;
5312         num_sg_in_iu = 0;
5313         max_sg_per_iu--;        /* Subtract 1 to leave room for chain marker. */
5314
5315         while (1) {
5316                 pqi_set_sg_descriptor(sg_descriptor, sg);
5317                 if (!*chained)
5318                         num_sg_in_iu++;
5319                 i++;
5320                 if (i == sg_count)
5321                         break;
5322                 sg_descriptor++;
5323                 if (i == max_sg_per_iu) {
5324                         put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5325                                 &sg_descriptor->address);
5326                         put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5327                                 &sg_descriptor->length);
5328                         put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5329                         *chained = true;
5330                         num_sg_in_iu++;
5331                         sg_descriptor = io_request->sg_chain_buffer;
5332                 }
5333                 sg = sg_next(sg);
5334         }
5335
5336         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5337
5338         return num_sg_in_iu;
5339 }
5340
5341 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5342         struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5343         struct pqi_io_request *io_request)
5344 {
5345         u16 iu_length;
5346         int sg_count;
5347         bool chained;
5348         unsigned int num_sg_in_iu;
5349         struct scatterlist *sg;
5350         struct pqi_sg_descriptor *sg_descriptor;
5351
5352         sg_count = scsi_dma_map(scmd);
5353         if (sg_count < 0)
5354                 return sg_count;
5355
5356         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5357                 PQI_REQUEST_HEADER_LENGTH;
5358
5359         if (sg_count == 0)
5360                 goto out;
5361
5362         sg = scsi_sglist(scmd);
5363         sg_descriptor = request->sg_descriptors;
5364
5365         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5366                 ctrl_info->max_sg_per_iu, &chained);
5367
5368         request->partial = chained;
5369         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5370
5371 out:
5372         put_unaligned_le16(iu_length, &request->header.iu_length);
5373
5374         return 0;
5375 }
5376
5377 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5378         struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5379         struct pqi_io_request *io_request)
5380 {
5381         u16 iu_length;
5382         int sg_count;
5383         bool chained;
5384         unsigned int num_sg_in_iu;
5385         struct scatterlist *sg;
5386         struct pqi_sg_descriptor *sg_descriptor;
5387
5388         sg_count = scsi_dma_map(scmd);
5389         if (sg_count < 0)
5390                 return sg_count;
5391
5392         iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5393                 PQI_REQUEST_HEADER_LENGTH;
5394         num_sg_in_iu = 0;
5395
5396         if (sg_count == 0)
5397                 goto out;
5398
5399         sg = scsi_sglist(scmd);
5400         sg_descriptor = request->sg_descriptors;
5401
5402         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5403                 ctrl_info->max_sg_per_iu, &chained);
5404
5405         request->partial = chained;
5406         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5407
5408 out:
5409         put_unaligned_le16(iu_length, &request->header.iu_length);
5410         request->num_sg_descriptors = num_sg_in_iu;
5411
5412         return 0;
5413 }
5414
5415 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5416         struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5417         struct pqi_io_request *io_request)
5418 {
5419         u16 iu_length;
5420         int sg_count;
5421         bool chained;
5422         unsigned int num_sg_in_iu;
5423         struct scatterlist *sg;
5424         struct pqi_sg_descriptor *sg_descriptor;
5425
5426         sg_count = scsi_dma_map(scmd);
5427         if (sg_count < 0)
5428                 return sg_count;
5429
5430         iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5431                 PQI_REQUEST_HEADER_LENGTH;
5432         num_sg_in_iu = 0;
5433
5434         if (sg_count != 0) {
5435                 sg = scsi_sglist(scmd);
5436                 sg_descriptor = request->sg_descriptors;
5437
5438                 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5439                         ctrl_info->max_sg_per_r56_iu, &chained);
5440
5441                 request->partial = chained;
5442                 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5443         }
5444
5445         put_unaligned_le16(iu_length, &request->header.iu_length);
5446         request->num_sg_descriptors = num_sg_in_iu;
5447
5448         return 0;
5449 }
5450
5451 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5452         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5453         struct pqi_io_request *io_request)
5454 {
5455         u16 iu_length;
5456         int sg_count;
5457         bool chained;
5458         unsigned int num_sg_in_iu;
5459         struct scatterlist *sg;
5460         struct pqi_sg_descriptor *sg_descriptor;
5461
5462         sg_count = scsi_dma_map(scmd);
5463         if (sg_count < 0)
5464                 return sg_count;
5465
5466         iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5467                 PQI_REQUEST_HEADER_LENGTH;
5468         num_sg_in_iu = 0;
5469
5470         if (sg_count == 0)
5471                 goto out;
5472
5473         sg = scsi_sglist(scmd);
5474         sg_descriptor = request->sg_descriptors;
5475
5476         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5477                 ctrl_info->max_sg_per_iu, &chained);
5478
5479         request->partial = chained;
5480         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5481
5482 out:
5483         put_unaligned_le16(iu_length, &request->header.iu_length);
5484         request->num_sg_descriptors = num_sg_in_iu;
5485
5486         return 0;
5487 }
5488
5489 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5490         void *context)
5491 {
5492         struct scsi_cmnd *scmd;
5493
5494         scmd = io_request->scmd;
5495         pqi_free_io_request(io_request);
5496         scsi_dma_unmap(scmd);
5497         pqi_scsi_done(scmd);
5498 }
5499
5500 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5501         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5502         struct pqi_queue_group *queue_group, bool io_high_prio)
5503 {
5504         int rc;
5505         size_t cdb_length;
5506         struct pqi_io_request *io_request;
5507         struct pqi_raid_path_request *request;
5508
5509         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5510         if (!io_request)
5511                 return SCSI_MLQUEUE_HOST_BUSY;
5512
5513         io_request->io_complete_callback = pqi_raid_io_complete;
5514         io_request->scmd = scmd;
5515
5516         request = io_request->iu;
5517         memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5518
5519         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5520         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5521         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5522         request->command_priority = io_high_prio;
5523         put_unaligned_le16(io_request->index, &request->request_id);
5524         request->error_index = request->request_id;
5525         memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5526         request->ml_device_lun_number = (u8)scmd->device->lun;
5527
5528         cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5529         memcpy(request->cdb, scmd->cmnd, cdb_length);
5530
5531         switch (cdb_length) {
5532         case 6:
5533         case 10:
5534         case 12:
5535         case 16:
5536                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5537                 break;
5538         case 20:
5539                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5540                 break;
5541         case 24:
5542                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5543                 break;
5544         case 28:
5545                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5546                 break;
5547         case 32:
5548         default:
5549                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5550                 break;
5551         }
5552
5553         switch (scmd->sc_data_direction) {
5554         case DMA_FROM_DEVICE:
5555                 request->data_direction = SOP_READ_FLAG;
5556                 break;
5557         case DMA_TO_DEVICE:
5558                 request->data_direction = SOP_WRITE_FLAG;
5559                 break;
5560         case DMA_NONE:
5561                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5562                 break;
5563         case DMA_BIDIRECTIONAL:
5564                 request->data_direction = SOP_BIDIRECTIONAL;
5565                 break;
5566         default:
5567                 dev_err(&ctrl_info->pci_dev->dev,
5568                         "unknown data direction: %d\n",
5569                         scmd->sc_data_direction);
5570                 break;
5571         }
5572
5573         rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5574         if (rc) {
5575                 pqi_free_io_request(io_request);
5576                 return SCSI_MLQUEUE_HOST_BUSY;
5577         }
5578
5579         pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5580
5581         return 0;
5582 }
5583
5584 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5585         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5586         struct pqi_queue_group *queue_group)
5587 {
5588         bool io_high_prio;
5589
5590         io_high_prio = pqi_is_io_high_priority(device, scmd);
5591
5592         return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5593 }
5594
5595 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5596 {
5597         struct scsi_cmnd *scmd;
5598         struct pqi_scsi_dev *device;
5599         struct pqi_ctrl_info *ctrl_info;
5600
5601         if (!io_request->raid_bypass)
5602                 return false;
5603
5604         scmd = io_request->scmd;
5605         if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5606                 return false;
5607         if (host_byte(scmd->result) == DID_NO_CONNECT)
5608                 return false;
5609
5610         device = scmd->device->hostdata;
5611         if (pqi_device_offline(device) || pqi_device_in_remove(device))
5612                 return false;
5613
5614         ctrl_info = shost_to_hba(scmd->device->host);
5615         if (pqi_ctrl_offline(ctrl_info))
5616                 return false;
5617
5618         return true;
5619 }
5620
5621 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5622         void *context)
5623 {
5624         struct scsi_cmnd *scmd;
5625
5626         scmd = io_request->scmd;
5627         scsi_dma_unmap(scmd);
5628         if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5629                 set_host_byte(scmd, DID_IMM_RETRY);
5630                 pqi_cmd_priv(scmd)->this_residual++;
5631         }
5632
5633         pqi_free_io_request(io_request);
5634         pqi_scsi_done(scmd);
5635 }
5636
5637 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5638         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5639         struct pqi_queue_group *queue_group)
5640 {
5641         bool io_high_prio;
5642
5643         io_high_prio = pqi_is_io_high_priority(device, scmd);
5644
5645         return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5646                 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5647                 false, io_high_prio);
5648 }
5649
5650 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5651         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5652         unsigned int cdb_length, struct pqi_queue_group *queue_group,
5653         struct pqi_encryption_info *encryption_info, bool raid_bypass,
5654         bool io_high_prio)
5655 {
5656         int rc;
5657         struct pqi_io_request *io_request;
5658         struct pqi_aio_path_request *request;
5659
5660         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5661         if (!io_request)
5662                 return SCSI_MLQUEUE_HOST_BUSY;
5663
5664         io_request->io_complete_callback = pqi_aio_io_complete;
5665         io_request->scmd = scmd;
5666         io_request->raid_bypass = raid_bypass;
5667
5668         request = io_request->iu;
5669         memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5670
5671         request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5672         put_unaligned_le32(aio_handle, &request->nexus_id);
5673         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5674         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5675         request->command_priority = io_high_prio;
5676         put_unaligned_le16(io_request->index, &request->request_id);
5677         request->error_index = request->request_id;
5678         if (!raid_bypass && ctrl_info->multi_lun_device_supported)
5679                 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
5680         if (cdb_length > sizeof(request->cdb))
5681                 cdb_length = sizeof(request->cdb);
5682         request->cdb_length = cdb_length;
5683         memcpy(request->cdb, cdb, cdb_length);
5684
5685         switch (scmd->sc_data_direction) {
5686         case DMA_TO_DEVICE:
5687                 request->data_direction = SOP_READ_FLAG;
5688                 break;
5689         case DMA_FROM_DEVICE:
5690                 request->data_direction = SOP_WRITE_FLAG;
5691                 break;
5692         case DMA_NONE:
5693                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5694                 break;
5695         case DMA_BIDIRECTIONAL:
5696                 request->data_direction = SOP_BIDIRECTIONAL;
5697                 break;
5698         default:
5699                 dev_err(&ctrl_info->pci_dev->dev,
5700                         "unknown data direction: %d\n",
5701                         scmd->sc_data_direction);
5702                 break;
5703         }
5704
5705         if (encryption_info) {
5706                 request->encryption_enable = true;
5707                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5708                         &request->data_encryption_key_index);
5709                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5710                         &request->encrypt_tweak_lower);
5711                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5712                         &request->encrypt_tweak_upper);
5713         }
5714
5715         rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5716         if (rc) {
5717                 pqi_free_io_request(io_request);
5718                 return SCSI_MLQUEUE_HOST_BUSY;
5719         }
5720
5721         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5722
5723         return 0;
5724 }
5725
5726 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5727         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5728         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5729         struct pqi_scsi_dev_raid_map_data *rmd)
5730 {
5731         int rc;
5732         struct pqi_io_request *io_request;
5733         struct pqi_aio_r1_path_request *r1_request;
5734
5735         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5736         if (!io_request)
5737                 return SCSI_MLQUEUE_HOST_BUSY;
5738
5739         io_request->io_complete_callback = pqi_aio_io_complete;
5740         io_request->scmd = scmd;
5741         io_request->raid_bypass = true;
5742
5743         r1_request = io_request->iu;
5744         memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5745
5746         r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5747         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5748         r1_request->num_drives = rmd->num_it_nexus_entries;
5749         put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5750         put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5751         if (rmd->num_it_nexus_entries == 3)
5752                 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5753
5754         put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5755         r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5756         put_unaligned_le16(io_request->index, &r1_request->request_id);
5757         r1_request->error_index = r1_request->request_id;
5758         if (rmd->cdb_length > sizeof(r1_request->cdb))
5759                 rmd->cdb_length = sizeof(r1_request->cdb);
5760         r1_request->cdb_length = rmd->cdb_length;
5761         memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5762
5763         /* The direction is always write. */
5764         r1_request->data_direction = SOP_READ_FLAG;
5765
5766         if (encryption_info) {
5767                 r1_request->encryption_enable = true;
5768                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5769                                 &r1_request->data_encryption_key_index);
5770                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5771                                 &r1_request->encrypt_tweak_lower);
5772                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5773                                 &r1_request->encrypt_tweak_upper);
5774         }
5775
5776         rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5777         if (rc) {
5778                 pqi_free_io_request(io_request);
5779                 return SCSI_MLQUEUE_HOST_BUSY;
5780         }
5781
5782         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5783
5784         return 0;
5785 }
5786
5787 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5788         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5789         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5790         struct pqi_scsi_dev_raid_map_data *rmd)
5791 {
5792         int rc;
5793         struct pqi_io_request *io_request;
5794         struct pqi_aio_r56_path_request *r56_request;
5795
5796         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5797         if (!io_request)
5798                 return SCSI_MLQUEUE_HOST_BUSY;
5799         io_request->io_complete_callback = pqi_aio_io_complete;
5800         io_request->scmd = scmd;
5801         io_request->raid_bypass = true;
5802
5803         r56_request = io_request->iu;
5804         memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5805
5806         if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5807                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5808         else
5809                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5810
5811         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5812         put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5813         put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5814         if (rmd->raid_level == SA_RAID_6) {
5815                 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5816                 r56_request->xor_multiplier = rmd->xor_mult;
5817         }
5818         put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5819         r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5820         put_unaligned_le64(rmd->row, &r56_request->row);
5821
5822         put_unaligned_le16(io_request->index, &r56_request->request_id);
5823         r56_request->error_index = r56_request->request_id;
5824
5825         if (rmd->cdb_length > sizeof(r56_request->cdb))
5826                 rmd->cdb_length = sizeof(r56_request->cdb);
5827         r56_request->cdb_length = rmd->cdb_length;
5828         memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5829
5830         /* The direction is always write. */
5831         r56_request->data_direction = SOP_READ_FLAG;
5832
5833         if (encryption_info) {
5834                 r56_request->encryption_enable = true;
5835                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5836                                 &r56_request->data_encryption_key_index);
5837                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5838                                 &r56_request->encrypt_tweak_lower);
5839                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5840                                 &r56_request->encrypt_tweak_upper);
5841         }
5842
5843         rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5844         if (rc) {
5845                 pqi_free_io_request(io_request);
5846                 return SCSI_MLQUEUE_HOST_BUSY;
5847         }
5848
5849         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5850
5851         return 0;
5852 }
5853
5854 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5855         struct scsi_cmnd *scmd)
5856 {
5857         /*
5858          * We are setting host_tagset = 1 during init.
5859          */
5860         return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5861 }
5862
5863 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5864 {
5865         if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5866                 return false;
5867
5868         return pqi_cmd_priv(scmd)->this_residual == 0;
5869 }
5870
5871 /*
5872  * This function gets called just before we hand the completed SCSI request
5873  * back to the SML.
5874  */
5875
5876 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5877 {
5878         struct pqi_scsi_dev *device;
5879         struct completion *wait;
5880
5881         if (!scmd->device) {
5882                 set_host_byte(scmd, DID_NO_CONNECT);
5883                 return;
5884         }
5885
5886         device = scmd->device->hostdata;
5887         if (!device) {
5888                 set_host_byte(scmd, DID_NO_CONNECT);
5889                 return;
5890         }
5891
5892         atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5893
5894         wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
5895         if (wait != PQI_NO_COMPLETION)
5896                 complete(wait);
5897 }
5898
5899 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5900         struct scsi_cmnd *scmd)
5901 {
5902         u32 oldest_jiffies;
5903         u8 lru_index;
5904         int i;
5905         int rc;
5906         struct pqi_scsi_dev *device;
5907         struct pqi_stream_data *pqi_stream_data;
5908         struct pqi_scsi_dev_raid_map_data rmd;
5909
5910         if (!ctrl_info->enable_stream_detection)
5911                 return false;
5912
5913         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5914         if (rc)
5915                 return false;
5916
5917         /* Check writes only. */
5918         if (!rmd.is_write)
5919                 return false;
5920
5921         device = scmd->device->hostdata;
5922
5923         /* Check for RAID 5/6 streams. */
5924         if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5925                 return false;
5926
5927         /*
5928          * If controller does not support AIO RAID{5,6} writes, need to send
5929          * requests down non-AIO path.
5930          */
5931         if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5932                 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5933                 return true;
5934
5935         lru_index = 0;
5936         oldest_jiffies = INT_MAX;
5937         for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5938                 pqi_stream_data = &device->stream_data[i];
5939                 /*
5940                  * Check for adjacent request or request is within
5941                  * the previous request.
5942                  */
5943                 if ((pqi_stream_data->next_lba &&
5944                         rmd.first_block >= pqi_stream_data->next_lba) &&
5945                         rmd.first_block <= pqi_stream_data->next_lba +
5946                                 rmd.block_cnt) {
5947                         pqi_stream_data->next_lba = rmd.first_block +
5948                                 rmd.block_cnt;
5949                         pqi_stream_data->last_accessed = jiffies;
5950                         return true;
5951                 }
5952
5953                 /* unused entry */
5954                 if (pqi_stream_data->last_accessed == 0) {
5955                         lru_index = i;
5956                         break;
5957                 }
5958
5959                 /* Find entry with oldest last accessed time. */
5960                 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
5961                         oldest_jiffies = pqi_stream_data->last_accessed;
5962                         lru_index = i;
5963                 }
5964         }
5965
5966         /* Set LRU entry. */
5967         pqi_stream_data = &device->stream_data[lru_index];
5968         pqi_stream_data->last_accessed = jiffies;
5969         pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
5970
5971         return false;
5972 }
5973
5974 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5975 {
5976         int rc;
5977         struct pqi_ctrl_info *ctrl_info;
5978         struct pqi_scsi_dev *device;
5979         u16 hw_queue;
5980         struct pqi_queue_group *queue_group;
5981         bool raid_bypassed;
5982         u8 lun;
5983
5984         scmd->host_scribble = PQI_NO_COMPLETION;
5985
5986         device = scmd->device->hostdata;
5987
5988         if (!device) {
5989                 set_host_byte(scmd, DID_NO_CONNECT);
5990                 pqi_scsi_done(scmd);
5991                 return 0;
5992         }
5993
5994         lun = (u8)scmd->device->lun;
5995
5996         atomic_inc(&device->scsi_cmds_outstanding[lun]);
5997
5998         ctrl_info = shost_to_hba(shost);
5999
6000         if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) {
6001                 set_host_byte(scmd, DID_NO_CONNECT);
6002                 pqi_scsi_done(scmd);
6003                 return 0;
6004         }
6005
6006         if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
6007                 rc = SCSI_MLQUEUE_HOST_BUSY;
6008                 goto out;
6009         }
6010
6011         /*
6012          * This is necessary because the SML doesn't zero out this field during
6013          * error recovery.
6014          */
6015         scmd->result = 0;
6016
6017         hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6018         queue_group = &ctrl_info->queue_groups[hw_queue];
6019
6020         if (pqi_is_logical_device(device)) {
6021                 raid_bypassed = false;
6022                 if (device->raid_bypass_enabled &&
6023                         pqi_is_bypass_eligible_request(scmd) &&
6024                         !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6025                         rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6026                         if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
6027                                 raid_bypassed = true;
6028                                 device->raid_bypass_cnt++;
6029                         }
6030                 }
6031                 if (!raid_bypassed)
6032                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6033         } else {
6034                 if (device->aio_enabled)
6035                         rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6036                 else
6037                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6038         }
6039
6040 out:
6041         if (rc) {
6042                 scmd->host_scribble = NULL;
6043                 atomic_dec(&device->scsi_cmds_outstanding[lun]);
6044         }
6045
6046         return rc;
6047 }
6048
6049 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6050 {
6051         unsigned int i;
6052         unsigned int path;
6053         unsigned long flags;
6054         unsigned int queued_io_count;
6055         struct pqi_queue_group *queue_group;
6056         struct pqi_io_request *io_request;
6057
6058         queued_io_count = 0;
6059
6060         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6061                 queue_group = &ctrl_info->queue_groups[i];
6062                 for (path = 0; path < 2; path++) {
6063                         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6064                         list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6065                                 queued_io_count++;
6066                         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6067                 }
6068         }
6069
6070         return queued_io_count;
6071 }
6072
6073 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6074 {
6075         unsigned int i;
6076         unsigned int path;
6077         unsigned int nonempty_inbound_queue_count;
6078         struct pqi_queue_group *queue_group;
6079         pqi_index_t iq_pi;
6080         pqi_index_t iq_ci;
6081
6082         nonempty_inbound_queue_count = 0;
6083
6084         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6085                 queue_group = &ctrl_info->queue_groups[i];
6086                 for (path = 0; path < 2; path++) {
6087                         iq_pi = queue_group->iq_pi_copy[path];
6088                         iq_ci = readl(queue_group->iq_ci[path]);
6089                         if (iq_ci != iq_pi)
6090                                 nonempty_inbound_queue_count++;
6091                 }
6092         }
6093
6094         return nonempty_inbound_queue_count;
6095 }
6096
6097 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS        10
6098
6099 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6100 {
6101         unsigned long start_jiffies;
6102         unsigned long warning_timeout;
6103         unsigned int queued_io_count;
6104         unsigned int nonempty_inbound_queue_count;
6105         bool displayed_warning;
6106
6107         displayed_warning = false;
6108         start_jiffies = jiffies;
6109         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6110
6111         while (1) {
6112                 queued_io_count = pqi_queued_io_count(ctrl_info);
6113                 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6114                 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6115                         break;
6116                 pqi_check_ctrl_health(ctrl_info);
6117                 if (pqi_ctrl_offline(ctrl_info))
6118                         return -ENXIO;
6119                 if (time_after(jiffies, warning_timeout)) {
6120                         dev_warn(&ctrl_info->pci_dev->dev,
6121                                 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6122                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6123                         displayed_warning = true;
6124                         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6125                 }
6126                 usleep_range(1000, 2000);
6127         }
6128
6129         if (displayed_warning)
6130                 dev_warn(&ctrl_info->pci_dev->dev,
6131                         "queued I/O drained after waiting for %u seconds\n",
6132                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6133
6134         return 0;
6135 }
6136
6137 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6138         struct pqi_scsi_dev *device, u8 lun)
6139 {
6140         unsigned int i;
6141         unsigned int path;
6142         struct pqi_queue_group *queue_group;
6143         unsigned long flags;
6144         struct pqi_io_request *io_request;
6145         struct pqi_io_request *next;
6146         struct scsi_cmnd *scmd;
6147         struct pqi_scsi_dev *scsi_device;
6148
6149         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6150                 queue_group = &ctrl_info->queue_groups[i];
6151
6152                 for (path = 0; path < 2; path++) {
6153                         spin_lock_irqsave(
6154                                 &queue_group->submit_lock[path], flags);
6155
6156                         list_for_each_entry_safe(io_request, next,
6157                                 &queue_group->request_list[path],
6158                                 request_list_entry) {
6159
6160                                 scmd = io_request->scmd;
6161                                 if (!scmd)
6162                                         continue;
6163
6164                                 scsi_device = scmd->device->hostdata;
6165                                 if (scsi_device != device)
6166                                         continue;
6167
6168                                 if ((u8)scmd->device->lun != lun)
6169                                         continue;
6170
6171                                 list_del(&io_request->request_list_entry);
6172                                 set_host_byte(scmd, DID_RESET);
6173                                 pqi_free_io_request(io_request);
6174                                 scsi_dma_unmap(scmd);
6175                                 pqi_scsi_done(scmd);
6176                         }
6177
6178                         spin_unlock_irqrestore(
6179                                 &queue_group->submit_lock[path], flags);
6180                 }
6181         }
6182 }
6183
6184 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS     10
6185
6186 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6187         struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6188 {
6189         int cmds_outstanding;
6190         unsigned long start_jiffies;
6191         unsigned long warning_timeout;
6192         unsigned long msecs_waiting;
6193
6194         start_jiffies = jiffies;
6195         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6196
6197         while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6198                 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6199                         pqi_check_ctrl_health(ctrl_info);
6200                         if (pqi_ctrl_offline(ctrl_info))
6201                                 return -ENXIO;
6202                 }
6203                 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6204                 if (msecs_waiting >= timeout_msecs) {
6205                         dev_err(&ctrl_info->pci_dev->dev,
6206                                 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6207                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6208                                 lun, msecs_waiting / 1000, cmds_outstanding);
6209                         return -ETIMEDOUT;
6210                 }
6211                 if (time_after(jiffies, warning_timeout)) {
6212                         dev_warn(&ctrl_info->pci_dev->dev,
6213                                 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6214                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6215                                 lun, msecs_waiting / 1000, cmds_outstanding);
6216                         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6217                 }
6218                 usleep_range(1000, 2000);
6219         }
6220
6221         return 0;
6222 }
6223
6224 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6225         void *context)
6226 {
6227         struct completion *waiting = context;
6228
6229         complete(waiting);
6230 }
6231
6232 #define PQI_LUN_RESET_POLL_COMPLETION_SECS      10
6233
6234 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6235         struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6236 {
6237         int rc;
6238         unsigned int wait_secs;
6239         int cmds_outstanding;
6240
6241         wait_secs = 0;
6242
6243         while (1) {
6244                 if (wait_for_completion_io_timeout(wait,
6245                         PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6246                         rc = 0;
6247                         break;
6248                 }
6249
6250                 pqi_check_ctrl_health(ctrl_info);
6251                 if (pqi_ctrl_offline(ctrl_info)) {
6252                         rc = -ENXIO;
6253                         break;
6254                 }
6255
6256                 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6257                 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6258                 dev_warn(&ctrl_info->pci_dev->dev,
6259                         "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6260                         ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6261         }
6262
6263         return rc;
6264 }
6265
6266 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS     30
6267
6268 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6269 {
6270         int rc;
6271         struct pqi_io_request *io_request;
6272         DECLARE_COMPLETION_ONSTACK(wait);
6273         struct pqi_task_management_request *request;
6274
6275         io_request = pqi_alloc_io_request(ctrl_info, NULL);
6276         io_request->io_complete_callback = pqi_lun_reset_complete;
6277         io_request->context = &wait;
6278
6279         request = io_request->iu;
6280         memset(request, 0, sizeof(*request));
6281
6282         request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6283         put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6284                 &request->header.iu_length);
6285         put_unaligned_le16(io_request->index, &request->request_id);
6286         memcpy(request->lun_number, device->scsi3addr,
6287                 sizeof(request->lun_number));
6288         if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6289                 request->ml_device_lun_number = lun;
6290         request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6291         if (ctrl_info->tmf_iu_timeout_supported)
6292                 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6293
6294         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6295                 io_request);
6296
6297         rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
6298         if (rc == 0)
6299                 rc = io_request->status;
6300
6301         pqi_free_io_request(io_request);
6302
6303         return rc;
6304 }
6305
6306 #define PQI_LUN_RESET_RETRIES                           3
6307 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS              (10 * 1000)
6308 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS          (10 * 60 * 1000)
6309 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS   (2 * 60 * 1000)
6310
6311 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6312 {
6313         int reset_rc;
6314         int wait_rc;
6315         unsigned int retries;
6316         unsigned long timeout_msecs;
6317
6318         for (retries = 0;;) {
6319                 reset_rc = pqi_lun_reset(ctrl_info, device, lun);
6320                 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
6321                         break;
6322                 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6323         }
6324
6325         timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6326                 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6327
6328         wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
6329         if (wait_rc && reset_rc == 0)
6330                 reset_rc = wait_rc;
6331
6332         return reset_rc == 0 ? SUCCESS : FAILED;
6333 }
6334
6335 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6336 {
6337         int rc;
6338
6339         pqi_ctrl_block_requests(ctrl_info);
6340         pqi_ctrl_wait_until_quiesced(ctrl_info);
6341         pqi_fail_io_queued_for_device(ctrl_info, device, lun);
6342         rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6343         pqi_device_reset_start(device, lun);
6344         pqi_ctrl_unblock_requests(ctrl_info);
6345         if (rc)
6346                 rc = FAILED;
6347         else
6348                 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
6349         pqi_device_reset_done(device, lun);
6350
6351         return rc;
6352 }
6353
6354 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
6355 {
6356         int rc;
6357
6358         mutex_lock(&ctrl_info->lun_reset_mutex);
6359
6360         dev_err(&ctrl_info->pci_dev->dev,
6361                 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
6362                 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
6363
6364         pqi_check_ctrl_health(ctrl_info);
6365         if (pqi_ctrl_offline(ctrl_info))
6366                 rc = FAILED;
6367         else
6368                 rc = pqi_device_reset(ctrl_info, device, lun);
6369
6370         dev_err(&ctrl_info->pci_dev->dev,
6371                 "reset of scsi %d:%d:%d:%u: %s\n",
6372                 ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
6373                 rc == SUCCESS ? "SUCCESS" : "FAILED");
6374
6375         mutex_unlock(&ctrl_info->lun_reset_mutex);
6376
6377         return rc;
6378 }
6379
6380 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6381 {
6382         struct Scsi_Host *shost;
6383         struct pqi_ctrl_info *ctrl_info;
6384         struct pqi_scsi_dev *device;
6385         u8 scsi_opcode;
6386
6387         shost = scmd->device->host;
6388         ctrl_info = shost_to_hba(shost);
6389         device = scmd->device->hostdata;
6390         scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6391
6392         return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
6393 }
6394
6395 static void pqi_tmf_worker(struct work_struct *work)
6396 {
6397         struct pqi_tmf_work *tmf_work;
6398         struct scsi_cmnd *scmd;
6399
6400         tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
6401         scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
6402
6403         pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
6404 }
6405
6406 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
6407 {
6408         struct Scsi_Host *shost;
6409         struct pqi_ctrl_info *ctrl_info;
6410         struct pqi_scsi_dev *device;
6411         struct pqi_tmf_work *tmf_work;
6412         DECLARE_COMPLETION_ONSTACK(wait);
6413
6414         shost = scmd->device->host;
6415         ctrl_info = shost_to_hba(shost);
6416         device = scmd->device->hostdata;
6417
6418         dev_err(&ctrl_info->pci_dev->dev,
6419                 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
6420                 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6421
6422         if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
6423                 dev_err(&ctrl_info->pci_dev->dev,
6424                         "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
6425                         shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6426                 scmd->result = DID_RESET << 16;
6427                 goto out;
6428         }
6429
6430         tmf_work = &device->tmf_work[scmd->device->lun];
6431
6432         if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
6433                 tmf_work->ctrl_info = ctrl_info;
6434                 tmf_work->device = device;
6435                 tmf_work->lun = (u8)scmd->device->lun;
6436                 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6437                 schedule_work(&tmf_work->work_struct);
6438         }
6439
6440         wait_for_completion(&wait);
6441
6442         dev_err(&ctrl_info->pci_dev->dev,
6443                 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
6444                 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6445
6446 out:
6447
6448         return SUCCESS;
6449 }
6450
6451 static int pqi_slave_alloc(struct scsi_device *sdev)
6452 {
6453         struct pqi_scsi_dev *device;
6454         unsigned long flags;
6455         struct pqi_ctrl_info *ctrl_info;
6456         struct scsi_target *starget;
6457         struct sas_rphy *rphy;
6458
6459         ctrl_info = shost_to_hba(sdev->host);
6460
6461         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6462
6463         if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6464                 starget = scsi_target(sdev);
6465                 rphy = target_to_rphy(starget);
6466                 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6467                 if (device) {
6468                         if (device->target_lun_valid) {
6469                                 device->ignore_device = true;
6470                         } else {
6471                                 device->target = sdev_id(sdev);
6472                                 device->lun = sdev->lun;
6473                                 device->target_lun_valid = true;
6474                         }
6475                 }
6476         } else {
6477                 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6478                         sdev_id(sdev), sdev->lun);
6479         }
6480
6481         if (device) {
6482                 sdev->hostdata = device;
6483                 device->sdev = sdev;
6484                 if (device->queue_depth) {
6485                         device->advertised_queue_depth = device->queue_depth;
6486                         scsi_change_queue_depth(sdev,
6487                                 device->advertised_queue_depth);
6488                 }
6489                 if (pqi_is_logical_device(device)) {
6490                         pqi_disable_write_same(sdev);
6491                 } else {
6492                         sdev->allow_restart = 1;
6493                         if (device->device_type == SA_DEVICE_TYPE_NVME)
6494                                 pqi_disable_write_same(sdev);
6495                 }
6496         }
6497
6498         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6499
6500         return 0;
6501 }
6502
6503 static void pqi_map_queues(struct Scsi_Host *shost)
6504 {
6505         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6506
6507         blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6508                               ctrl_info->pci_dev, 0);
6509 }
6510
6511 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6512 {
6513         return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6514 }
6515
6516 static int pqi_slave_configure(struct scsi_device *sdev)
6517 {
6518         int rc = 0;
6519         struct pqi_scsi_dev *device;
6520
6521         device = sdev->hostdata;
6522         device->devtype = sdev->type;
6523
6524         if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6525                 rc = -ENXIO;
6526                 device->ignore_device = false;
6527         }
6528
6529         return rc;
6530 }
6531
6532 static void pqi_slave_destroy(struct scsi_device *sdev)
6533 {
6534         struct pqi_ctrl_info *ctrl_info;
6535         struct pqi_scsi_dev *device;
6536         int mutex_acquired;
6537         unsigned long flags;
6538
6539         ctrl_info = shost_to_hba(sdev->host);
6540
6541         mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6542         if (!mutex_acquired)
6543                 return;
6544
6545         device = sdev->hostdata;
6546         if (!device) {
6547                 mutex_unlock(&ctrl_info->scan_mutex);
6548                 return;
6549         }
6550
6551         device->lun_count--;
6552         if (device->lun_count > 0) {
6553                 mutex_unlock(&ctrl_info->scan_mutex);
6554                 return;
6555         }
6556
6557         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6558         list_del(&device->scsi_device_list_entry);
6559         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6560
6561         mutex_unlock(&ctrl_info->scan_mutex);
6562
6563         pqi_dev_info(ctrl_info, "removed", device);
6564         pqi_free_device(device);
6565 }
6566
6567 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6568 {
6569         struct pci_dev *pci_dev;
6570         u32 subsystem_vendor;
6571         u32 subsystem_device;
6572         cciss_pci_info_struct pci_info;
6573
6574         if (!arg)
6575                 return -EINVAL;
6576
6577         pci_dev = ctrl_info->pci_dev;
6578
6579         pci_info.domain = pci_domain_nr(pci_dev->bus);
6580         pci_info.bus = pci_dev->bus->number;
6581         pci_info.dev_fn = pci_dev->devfn;
6582         subsystem_vendor = pci_dev->subsystem_vendor;
6583         subsystem_device = pci_dev->subsystem_device;
6584         pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6585
6586         if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
6587                 return -EFAULT;
6588
6589         return 0;
6590 }
6591
6592 static int pqi_getdrivver_ioctl(void __user *arg)
6593 {
6594         u32 version;
6595
6596         if (!arg)
6597                 return -EINVAL;
6598
6599         version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6600                 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6601
6602         if (copy_to_user(arg, &version, sizeof(version)))
6603                 return -EFAULT;
6604
6605         return 0;
6606 }
6607
6608 struct ciss_error_info {
6609         u8      scsi_status;
6610         int     command_status;
6611         size_t  sense_data_length;
6612 };
6613
6614 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6615         struct ciss_error_info *ciss_error_info)
6616 {
6617         int ciss_cmd_status;
6618         size_t sense_data_length;
6619
6620         switch (pqi_error_info->data_out_result) {
6621         case PQI_DATA_IN_OUT_GOOD:
6622                 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6623                 break;
6624         case PQI_DATA_IN_OUT_UNDERFLOW:
6625                 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6626                 break;
6627         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6628                 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6629                 break;
6630         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6631         case PQI_DATA_IN_OUT_BUFFER_ERROR:
6632         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6633         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6634         case PQI_DATA_IN_OUT_ERROR:
6635                 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6636                 break;
6637         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6638         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6639         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6640         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6641         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6642         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6643         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6644         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6645         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6646         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6647                 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6648                 break;
6649         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6650                 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6651                 break;
6652         case PQI_DATA_IN_OUT_ABORTED:
6653                 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6654                 break;
6655         case PQI_DATA_IN_OUT_TIMEOUT:
6656                 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6657                 break;
6658         default:
6659                 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6660                 break;
6661         }
6662
6663         sense_data_length =
6664                 get_unaligned_le16(&pqi_error_info->sense_data_length);
6665         if (sense_data_length == 0)
6666                 sense_data_length =
6667                 get_unaligned_le16(&pqi_error_info->response_data_length);
6668         if (sense_data_length)
6669                 if (sense_data_length > sizeof(pqi_error_info->data))
6670                         sense_data_length = sizeof(pqi_error_info->data);
6671
6672         ciss_error_info->scsi_status = pqi_error_info->status;
6673         ciss_error_info->command_status = ciss_cmd_status;
6674         ciss_error_info->sense_data_length = sense_data_length;
6675 }
6676
6677 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6678 {
6679         int rc;
6680         char *kernel_buffer = NULL;
6681         u16 iu_length;
6682         size_t sense_data_length;
6683         IOCTL_Command_struct iocommand;
6684         struct pqi_raid_path_request request;
6685         struct pqi_raid_error_info pqi_error_info;
6686         struct ciss_error_info ciss_error_info;
6687
6688         if (pqi_ctrl_offline(ctrl_info))
6689                 return -ENXIO;
6690         if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6691                 return -EBUSY;
6692         if (!arg)
6693                 return -EINVAL;
6694         if (!capable(CAP_SYS_RAWIO))
6695                 return -EPERM;
6696         if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6697                 return -EFAULT;
6698         if (iocommand.buf_size < 1 &&
6699                 iocommand.Request.Type.Direction != XFER_NONE)
6700                 return -EINVAL;
6701         if (iocommand.Request.CDBLen > sizeof(request.cdb))
6702                 return -EINVAL;
6703         if (iocommand.Request.Type.Type != TYPE_CMD)
6704                 return -EINVAL;
6705
6706         switch (iocommand.Request.Type.Direction) {
6707         case XFER_NONE:
6708         case XFER_WRITE:
6709         case XFER_READ:
6710         case XFER_READ | XFER_WRITE:
6711                 break;
6712         default:
6713                 return -EINVAL;
6714         }
6715
6716         if (iocommand.buf_size > 0) {
6717                 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6718                 if (!kernel_buffer)
6719                         return -ENOMEM;
6720                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6721                         if (copy_from_user(kernel_buffer, iocommand.buf,
6722                                 iocommand.buf_size)) {
6723                                 rc = -EFAULT;
6724                                 goto out;
6725                         }
6726                 } else {
6727                         memset(kernel_buffer, 0, iocommand.buf_size);
6728                 }
6729         }
6730
6731         memset(&request, 0, sizeof(request));
6732
6733         request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6734         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6735                 PQI_REQUEST_HEADER_LENGTH;
6736         memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6737                 sizeof(request.lun_number));
6738         memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6739         request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6740
6741         switch (iocommand.Request.Type.Direction) {
6742         case XFER_NONE:
6743                 request.data_direction = SOP_NO_DIRECTION_FLAG;
6744                 break;
6745         case XFER_WRITE:
6746                 request.data_direction = SOP_WRITE_FLAG;
6747                 break;
6748         case XFER_READ:
6749                 request.data_direction = SOP_READ_FLAG;
6750                 break;
6751         case XFER_READ | XFER_WRITE:
6752                 request.data_direction = SOP_BIDIRECTIONAL;
6753                 break;
6754         }
6755
6756         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6757
6758         if (iocommand.buf_size > 0) {
6759                 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6760
6761                 rc = pqi_map_single(ctrl_info->pci_dev,
6762                         &request.sg_descriptors[0], kernel_buffer,
6763                         iocommand.buf_size, DMA_BIDIRECTIONAL);
6764                 if (rc)
6765                         goto out;
6766
6767                 iu_length += sizeof(request.sg_descriptors[0]);
6768         }
6769
6770         put_unaligned_le16(iu_length, &request.header.iu_length);
6771
6772         if (ctrl_info->raid_iu_timeout_supported)
6773                 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6774
6775         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6776                 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6777
6778         if (iocommand.buf_size > 0)
6779                 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6780                         DMA_BIDIRECTIONAL);
6781
6782         memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6783
6784         if (rc == 0) {
6785                 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6786                 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6787                 iocommand.error_info.CommandStatus =
6788                         ciss_error_info.command_status;
6789                 sense_data_length = ciss_error_info.sense_data_length;
6790                 if (sense_data_length) {
6791                         if (sense_data_length >
6792                                 sizeof(iocommand.error_info.SenseInfo))
6793                                 sense_data_length =
6794                                         sizeof(iocommand.error_info.SenseInfo);
6795                         memcpy(iocommand.error_info.SenseInfo,
6796                                 pqi_error_info.data, sense_data_length);
6797                         iocommand.error_info.SenseLen = sense_data_length;
6798                 }
6799         }
6800
6801         if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6802                 rc = -EFAULT;
6803                 goto out;
6804         }
6805
6806         if (rc == 0 && iocommand.buf_size > 0 &&
6807                 (iocommand.Request.Type.Direction & XFER_READ)) {
6808                 if (copy_to_user(iocommand.buf, kernel_buffer,
6809                         iocommand.buf_size)) {
6810                         rc = -EFAULT;
6811                 }
6812         }
6813
6814 out:
6815         kfree(kernel_buffer);
6816
6817         return rc;
6818 }
6819
6820 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6821                      void __user *arg)
6822 {
6823         int rc;
6824         struct pqi_ctrl_info *ctrl_info;
6825
6826         ctrl_info = shost_to_hba(sdev->host);
6827
6828         switch (cmd) {
6829         case CCISS_DEREGDISK:
6830         case CCISS_REGNEWDISK:
6831         case CCISS_REGNEWD:
6832                 rc = pqi_scan_scsi_devices(ctrl_info);
6833                 break;
6834         case CCISS_GETPCIINFO:
6835                 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6836                 break;
6837         case CCISS_GETDRIVVER:
6838                 rc = pqi_getdrivver_ioctl(arg);
6839                 break;
6840         case CCISS_PASSTHRU:
6841                 rc = pqi_passthru_ioctl(ctrl_info, arg);
6842                 break;
6843         default:
6844                 rc = -EINVAL;
6845                 break;
6846         }
6847
6848         return rc;
6849 }
6850
6851 static ssize_t pqi_firmware_version_show(struct device *dev,
6852         struct device_attribute *attr, char *buffer)
6853 {
6854         struct Scsi_Host *shost;
6855         struct pqi_ctrl_info *ctrl_info;
6856
6857         shost = class_to_shost(dev);
6858         ctrl_info = shost_to_hba(shost);
6859
6860         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6861 }
6862
6863 static ssize_t pqi_driver_version_show(struct device *dev,
6864         struct device_attribute *attr, char *buffer)
6865 {
6866         return scnprintf(buffer, PAGE_SIZE, "%s\n", DRIVER_VERSION BUILD_TIMESTAMP);
6867 }
6868
6869 static ssize_t pqi_serial_number_show(struct device *dev,
6870         struct device_attribute *attr, char *buffer)
6871 {
6872         struct Scsi_Host *shost;
6873         struct pqi_ctrl_info *ctrl_info;
6874
6875         shost = class_to_shost(dev);
6876         ctrl_info = shost_to_hba(shost);
6877
6878         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6879 }
6880
6881 static ssize_t pqi_model_show(struct device *dev,
6882         struct device_attribute *attr, char *buffer)
6883 {
6884         struct Scsi_Host *shost;
6885         struct pqi_ctrl_info *ctrl_info;
6886
6887         shost = class_to_shost(dev);
6888         ctrl_info = shost_to_hba(shost);
6889
6890         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6891 }
6892
6893 static ssize_t pqi_vendor_show(struct device *dev,
6894         struct device_attribute *attr, char *buffer)
6895 {
6896         struct Scsi_Host *shost;
6897         struct pqi_ctrl_info *ctrl_info;
6898
6899         shost = class_to_shost(dev);
6900         ctrl_info = shost_to_hba(shost);
6901
6902         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6903 }
6904
6905 static ssize_t pqi_host_rescan_store(struct device *dev,
6906         struct device_attribute *attr, const char *buffer, size_t count)
6907 {
6908         struct Scsi_Host *shost = class_to_shost(dev);
6909
6910         pqi_scan_start(shost);
6911
6912         return count;
6913 }
6914
6915 static ssize_t pqi_lockup_action_show(struct device *dev,
6916         struct device_attribute *attr, char *buffer)
6917 {
6918         int count = 0;
6919         unsigned int i;
6920
6921         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6922                 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6923                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6924                                 "[%s] ", pqi_lockup_actions[i].name);
6925                 else
6926                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6927                                 "%s ", pqi_lockup_actions[i].name);
6928         }
6929
6930         count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6931
6932         return count;
6933 }
6934
6935 static ssize_t pqi_lockup_action_store(struct device *dev,
6936         struct device_attribute *attr, const char *buffer, size_t count)
6937 {
6938         unsigned int i;
6939         char *action_name;
6940         char action_name_buffer[32];
6941
6942         strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6943         action_name = strstrip(action_name_buffer);
6944
6945         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6946                 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6947                         pqi_lockup_action = pqi_lockup_actions[i].action;
6948                         return count;
6949                 }
6950         }
6951
6952         return -EINVAL;
6953 }
6954
6955 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6956         struct device_attribute *attr, char *buffer)
6957 {
6958         struct Scsi_Host *shost = class_to_shost(dev);
6959         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6960
6961         return scnprintf(buffer, 10, "%x\n",
6962                         ctrl_info->enable_stream_detection);
6963 }
6964
6965 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
6966         struct device_attribute *attr, const char *buffer, size_t count)
6967 {
6968         struct Scsi_Host *shost = class_to_shost(dev);
6969         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6970         u8 set_stream_detection = 0;
6971
6972         if (kstrtou8(buffer, 0, &set_stream_detection))
6973                 return -EINVAL;
6974
6975         if (set_stream_detection > 0)
6976                 set_stream_detection = 1;
6977
6978         ctrl_info->enable_stream_detection = set_stream_detection;
6979
6980         return count;
6981 }
6982
6983 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
6984         struct device_attribute *attr, char *buffer)
6985 {
6986         struct Scsi_Host *shost = class_to_shost(dev);
6987         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6988
6989         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
6990 }
6991
6992 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
6993         struct device_attribute *attr, const char *buffer, size_t count)
6994 {
6995         struct Scsi_Host *shost = class_to_shost(dev);
6996         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6997         u8 set_r5_writes = 0;
6998
6999         if (kstrtou8(buffer, 0, &set_r5_writes))
7000                 return -EINVAL;
7001
7002         if (set_r5_writes > 0)
7003                 set_r5_writes = 1;
7004
7005         ctrl_info->enable_r5_writes = set_r5_writes;
7006
7007         return count;
7008 }
7009
7010 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
7011         struct device_attribute *attr, char *buffer)
7012 {
7013         struct Scsi_Host *shost = class_to_shost(dev);
7014         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7015
7016         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
7017 }
7018
7019 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
7020         struct device_attribute *attr, const char *buffer, size_t count)
7021 {
7022         struct Scsi_Host *shost = class_to_shost(dev);
7023         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7024         u8 set_r6_writes = 0;
7025
7026         if (kstrtou8(buffer, 0, &set_r6_writes))
7027                 return -EINVAL;
7028
7029         if (set_r6_writes > 0)
7030                 set_r6_writes = 1;
7031
7032         ctrl_info->enable_r6_writes = set_r6_writes;
7033
7034         return count;
7035 }
7036
7037 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL);
7038 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
7039 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
7040 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
7041 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
7042 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
7043 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
7044         pqi_lockup_action_store);
7045 static DEVICE_ATTR(enable_stream_detection, 0644,
7046         pqi_host_enable_stream_detection_show,
7047         pqi_host_enable_stream_detection_store);
7048 static DEVICE_ATTR(enable_r5_writes, 0644,
7049         pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
7050 static DEVICE_ATTR(enable_r6_writes, 0644,
7051         pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
7052
7053 static struct attribute *pqi_shost_attrs[] = {
7054         &dev_attr_driver_version.attr,
7055         &dev_attr_firmware_version.attr,
7056         &dev_attr_model.attr,
7057         &dev_attr_serial_number.attr,
7058         &dev_attr_vendor.attr,
7059         &dev_attr_rescan.attr,
7060         &dev_attr_lockup_action.attr,
7061         &dev_attr_enable_stream_detection.attr,
7062         &dev_attr_enable_r5_writes.attr,
7063         &dev_attr_enable_r6_writes.attr,
7064         NULL
7065 };
7066
7067 ATTRIBUTE_GROUPS(pqi_shost);
7068
7069 static ssize_t pqi_unique_id_show(struct device *dev,
7070         struct device_attribute *attr, char *buffer)
7071 {
7072         struct pqi_ctrl_info *ctrl_info;
7073         struct scsi_device *sdev;
7074         struct pqi_scsi_dev *device;
7075         unsigned long flags;
7076         u8 unique_id[16];
7077
7078         sdev = to_scsi_device(dev);
7079         ctrl_info = shost_to_hba(sdev->host);
7080
7081         if (pqi_ctrl_offline(ctrl_info))
7082                 return -ENODEV;
7083
7084         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7085
7086         device = sdev->hostdata;
7087         if (!device) {
7088                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7089                 return -ENODEV;
7090         }
7091
7092         if (device->is_physical_device)
7093                 memcpy(unique_id, device->wwid, sizeof(device->wwid));
7094         else
7095                 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
7096
7097         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7098
7099         return scnprintf(buffer, PAGE_SIZE,
7100                 "%02X%02X%02X%02X%02X%02X%02X%02X"
7101                 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7102                 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7103                 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7104                 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7105                 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7106 }
7107
7108 static ssize_t pqi_lunid_show(struct device *dev,
7109         struct device_attribute *attr, char *buffer)
7110 {
7111         struct pqi_ctrl_info *ctrl_info;
7112         struct scsi_device *sdev;
7113         struct pqi_scsi_dev *device;
7114         unsigned long flags;
7115         u8 lunid[8];
7116
7117         sdev = to_scsi_device(dev);
7118         ctrl_info = shost_to_hba(sdev->host);
7119
7120         if (pqi_ctrl_offline(ctrl_info))
7121                 return -ENODEV;
7122
7123         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7124
7125         device = sdev->hostdata;
7126         if (!device) {
7127                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7128                 return -ENODEV;
7129         }
7130
7131         memcpy(lunid, device->scsi3addr, sizeof(lunid));
7132
7133         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7134
7135         return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7136 }
7137
7138 #define MAX_PATHS       8
7139
7140 static ssize_t pqi_path_info_show(struct device *dev,
7141         struct device_attribute *attr, char *buf)
7142 {
7143         struct pqi_ctrl_info *ctrl_info;
7144         struct scsi_device *sdev;
7145         struct pqi_scsi_dev *device;
7146         unsigned long flags;
7147         int i;
7148         int output_len = 0;
7149         u8 box;
7150         u8 bay;
7151         u8 path_map_index;
7152         char *active;
7153         u8 phys_connector[2];
7154
7155         sdev = to_scsi_device(dev);
7156         ctrl_info = shost_to_hba(sdev->host);
7157
7158         if (pqi_ctrl_offline(ctrl_info))
7159                 return -ENODEV;
7160
7161         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7162
7163         device = sdev->hostdata;
7164         if (!device) {
7165                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7166                 return -ENODEV;
7167         }
7168
7169         bay = device->bay;
7170         for (i = 0; i < MAX_PATHS; i++) {
7171                 path_map_index = 1 << i;
7172                 if (i == device->active_path_index)
7173                         active = "Active";
7174                 else if (device->path_map & path_map_index)
7175                         active = "Inactive";
7176                 else
7177                         continue;
7178
7179                 output_len += scnprintf(buf + output_len,
7180                                         PAGE_SIZE - output_len,
7181                                         "[%d:%d:%d:%d] %20.20s ",
7182                                         ctrl_info->scsi_host->host_no,
7183                                         device->bus, device->target,
7184                                         device->lun,
7185                                         scsi_device_type(device->devtype));
7186
7187                 if (device->devtype == TYPE_RAID ||
7188                         pqi_is_logical_device(device))
7189                         goto end_buffer;
7190
7191                 memcpy(&phys_connector, &device->phys_connector[i],
7192                         sizeof(phys_connector));
7193                 if (phys_connector[0] < '0')
7194                         phys_connector[0] = '0';
7195                 if (phys_connector[1] < '0')
7196                         phys_connector[1] = '0';
7197
7198                 output_len += scnprintf(buf + output_len,
7199                                         PAGE_SIZE - output_len,
7200                                         "PORT: %.2s ", phys_connector);
7201
7202                 box = device->box[i];
7203                 if (box != 0 && box != 0xFF)
7204                         output_len += scnprintf(buf + output_len,
7205                                                 PAGE_SIZE - output_len,
7206                                                 "BOX: %hhu ", box);
7207
7208                 if ((device->devtype == TYPE_DISK ||
7209                         device->devtype == TYPE_ZBC) &&
7210                         pqi_expose_device(device))
7211                         output_len += scnprintf(buf + output_len,
7212                                                 PAGE_SIZE - output_len,
7213                                                 "BAY: %hhu ", bay);
7214
7215 end_buffer:
7216                 output_len += scnprintf(buf + output_len,
7217                                         PAGE_SIZE - output_len,
7218                                         "%s\n", active);
7219         }
7220
7221         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7222
7223         return output_len;
7224 }
7225
7226 static ssize_t pqi_sas_address_show(struct device *dev,
7227         struct device_attribute *attr, char *buffer)
7228 {
7229         struct pqi_ctrl_info *ctrl_info;
7230         struct scsi_device *sdev;
7231         struct pqi_scsi_dev *device;
7232         unsigned long flags;
7233         u64 sas_address;
7234
7235         sdev = to_scsi_device(dev);
7236         ctrl_info = shost_to_hba(sdev->host);
7237
7238         if (pqi_ctrl_offline(ctrl_info))
7239                 return -ENODEV;
7240
7241         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7242
7243         device = sdev->hostdata;
7244         if (!device) {
7245                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7246                 return -ENODEV;
7247         }
7248
7249         sas_address = device->sas_address;
7250
7251         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7252
7253         return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7254 }
7255
7256 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7257         struct device_attribute *attr, char *buffer)
7258 {
7259         struct pqi_ctrl_info *ctrl_info;
7260         struct scsi_device *sdev;
7261         struct pqi_scsi_dev *device;
7262         unsigned long flags;
7263
7264         sdev = to_scsi_device(dev);
7265         ctrl_info = shost_to_hba(sdev->host);
7266
7267         if (pqi_ctrl_offline(ctrl_info))
7268                 return -ENODEV;
7269
7270         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7271
7272         device = sdev->hostdata;
7273         if (!device) {
7274                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7275                 return -ENODEV;
7276         }
7277
7278         buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7279         buffer[1] = '\n';
7280         buffer[2] = '\0';
7281
7282         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7283
7284         return 2;
7285 }
7286
7287 static ssize_t pqi_raid_level_show(struct device *dev,
7288         struct device_attribute *attr, char *buffer)
7289 {
7290         struct pqi_ctrl_info *ctrl_info;
7291         struct scsi_device *sdev;
7292         struct pqi_scsi_dev *device;
7293         unsigned long flags;
7294         char *raid_level;
7295
7296         sdev = to_scsi_device(dev);
7297         ctrl_info = shost_to_hba(sdev->host);
7298
7299         if (pqi_ctrl_offline(ctrl_info))
7300                 return -ENODEV;
7301
7302         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7303
7304         device = sdev->hostdata;
7305         if (!device) {
7306                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7307                 return -ENODEV;
7308         }
7309
7310         if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7311                 raid_level = pqi_raid_level_to_string(device->raid_level);
7312         else
7313                 raid_level = "N/A";
7314
7315         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7316
7317         return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7318 }
7319
7320 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7321         struct device_attribute *attr, char *buffer)
7322 {
7323         struct pqi_ctrl_info *ctrl_info;
7324         struct scsi_device *sdev;
7325         struct pqi_scsi_dev *device;
7326         unsigned long flags;
7327         unsigned int raid_bypass_cnt;
7328
7329         sdev = to_scsi_device(dev);
7330         ctrl_info = shost_to_hba(sdev->host);
7331
7332         if (pqi_ctrl_offline(ctrl_info))
7333                 return -ENODEV;
7334
7335         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7336
7337         device = sdev->hostdata;
7338         if (!device) {
7339                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7340                 return -ENODEV;
7341         }
7342
7343         raid_bypass_cnt = device->raid_bypass_cnt;
7344
7345         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7346
7347         return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
7348 }
7349
7350 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7351                 struct device_attribute *attr, char *buf)
7352 {
7353         struct pqi_ctrl_info *ctrl_info;
7354         struct scsi_device *sdev;
7355         struct pqi_scsi_dev *device;
7356         unsigned long flags;
7357         int output_len = 0;
7358
7359         sdev = to_scsi_device(dev);
7360         ctrl_info = shost_to_hba(sdev->host);
7361
7362         if (pqi_ctrl_offline(ctrl_info))
7363                 return -ENODEV;
7364
7365         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7366
7367         device = sdev->hostdata;
7368         if (!device) {
7369                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7370                 return -ENODEV;
7371         }
7372
7373         output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7374                                 device->ncq_prio_enable);
7375         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7376
7377         return output_len;
7378 }
7379
7380 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7381                         struct device_attribute *attr,
7382                         const char *buf, size_t count)
7383 {
7384         struct pqi_ctrl_info *ctrl_info;
7385         struct scsi_device *sdev;
7386         struct pqi_scsi_dev *device;
7387         unsigned long flags;
7388         u8 ncq_prio_enable = 0;
7389
7390         if (kstrtou8(buf, 0, &ncq_prio_enable))
7391                 return -EINVAL;
7392
7393         sdev = to_scsi_device(dev);
7394         ctrl_info = shost_to_hba(sdev->host);
7395
7396         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7397
7398         device = sdev->hostdata;
7399
7400         if (!device) {
7401                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7402                 return -ENODEV;
7403         }
7404
7405         if (!device->ncq_prio_support) {
7406                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7407                 return -EINVAL;
7408         }
7409
7410         device->ncq_prio_enable = ncq_prio_enable;
7411
7412         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7413
7414         return  strlen(buf);
7415 }
7416
7417 static ssize_t pqi_numa_node_show(struct device *dev,
7418         struct device_attribute *attr, char *buffer)
7419 {
7420         struct scsi_device *sdev;
7421         struct pqi_ctrl_info *ctrl_info;
7422
7423         sdev = to_scsi_device(dev);
7424         ctrl_info = shost_to_hba(sdev->host);
7425
7426         return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7427 }
7428
7429 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7430 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7431 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7432 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7433 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7434 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7435 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7436 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7437                 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7438 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7439
7440 static struct attribute *pqi_sdev_attrs[] = {
7441         &dev_attr_lunid.attr,
7442         &dev_attr_unique_id.attr,
7443         &dev_attr_path_info.attr,
7444         &dev_attr_sas_address.attr,
7445         &dev_attr_ssd_smart_path_enabled.attr,
7446         &dev_attr_raid_level.attr,
7447         &dev_attr_raid_bypass_cnt.attr,
7448         &dev_attr_sas_ncq_prio_enable.attr,
7449         &dev_attr_numa_node.attr,
7450         NULL
7451 };
7452
7453 ATTRIBUTE_GROUPS(pqi_sdev);
7454
7455 static const struct scsi_host_template pqi_driver_template = {
7456         .module = THIS_MODULE,
7457         .name = DRIVER_NAME_SHORT,
7458         .proc_name = DRIVER_NAME_SHORT,
7459         .queuecommand = pqi_scsi_queue_command,
7460         .scan_start = pqi_scan_start,
7461         .scan_finished = pqi_scan_finished,
7462         .this_id = -1,
7463         .eh_device_reset_handler = pqi_eh_device_reset_handler,
7464         .eh_abort_handler = pqi_eh_abort_handler,
7465         .ioctl = pqi_ioctl,
7466         .slave_alloc = pqi_slave_alloc,
7467         .slave_configure = pqi_slave_configure,
7468         .slave_destroy = pqi_slave_destroy,
7469         .map_queues = pqi_map_queues,
7470         .sdev_groups = pqi_sdev_groups,
7471         .shost_groups = pqi_shost_groups,
7472         .cmd_size = sizeof(struct pqi_cmd_priv),
7473 };
7474
7475 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7476 {
7477         int rc;
7478         struct Scsi_Host *shost;
7479
7480         shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7481         if (!shost) {
7482                 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7483                 return -ENOMEM;
7484         }
7485
7486         shost->io_port = 0;
7487         shost->n_io_port = 0;
7488         shost->this_id = -1;
7489         shost->max_channel = PQI_MAX_BUS;
7490         shost->max_cmd_len = MAX_COMMAND_SIZE;
7491         shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7492         shost->max_id = ~0;
7493         shost->max_sectors = ctrl_info->max_sectors;
7494         shost->can_queue = ctrl_info->scsi_ml_can_queue;
7495         shost->cmd_per_lun = shost->can_queue;
7496         shost->sg_tablesize = ctrl_info->sg_tablesize;
7497         shost->transportt = pqi_sas_transport_template;
7498         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7499         shost->unique_id = shost->irq;
7500         shost->nr_hw_queues = ctrl_info->num_queue_groups;
7501         shost->host_tagset = 1;
7502         shost->hostdata[0] = (unsigned long)ctrl_info;
7503
7504         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7505         if (rc) {
7506                 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7507                 goto free_host;
7508         }
7509
7510         rc = pqi_add_sas_host(shost, ctrl_info);
7511         if (rc) {
7512                 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7513                 goto remove_host;
7514         }
7515
7516         ctrl_info->scsi_host = shost;
7517
7518         return 0;
7519
7520 remove_host:
7521         scsi_remove_host(shost);
7522 free_host:
7523         scsi_host_put(shost);
7524
7525         return rc;
7526 }
7527
7528 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7529 {
7530         struct Scsi_Host *shost;
7531
7532         pqi_delete_sas_host(ctrl_info);
7533
7534         shost = ctrl_info->scsi_host;
7535         if (!shost)
7536                 return;
7537
7538         scsi_remove_host(shost);
7539         scsi_host_put(shost);
7540 }
7541
7542 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7543 {
7544         int rc = 0;
7545         struct pqi_device_registers __iomem *pqi_registers;
7546         unsigned long timeout;
7547         unsigned int timeout_msecs;
7548         union pqi_reset_register reset_reg;
7549
7550         pqi_registers = ctrl_info->pqi_registers;
7551         timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7552         timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7553
7554         while (1) {
7555                 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7556                 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7557                 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7558                         break;
7559                 if (!sis_is_firmware_running(ctrl_info)) {
7560                         rc = -ENXIO;
7561                         break;
7562                 }
7563                 if (time_after(jiffies, timeout)) {
7564                         rc = -ETIMEDOUT;
7565                         break;
7566                 }
7567         }
7568
7569         return rc;
7570 }
7571
7572 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7573 {
7574         int rc;
7575         union pqi_reset_register reset_reg;
7576
7577         if (ctrl_info->pqi_reset_quiesce_supported) {
7578                 rc = sis_pqi_reset_quiesce(ctrl_info);
7579                 if (rc) {
7580                         dev_err(&ctrl_info->pci_dev->dev,
7581                                 "PQI reset failed during quiesce with error %d\n", rc);
7582                         return rc;
7583                 }
7584         }
7585
7586         reset_reg.all_bits = 0;
7587         reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7588         reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7589
7590         writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7591
7592         rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7593         if (rc)
7594                 dev_err(&ctrl_info->pci_dev->dev,
7595                         "PQI reset failed with error %d\n", rc);
7596
7597         return rc;
7598 }
7599
7600 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7601 {
7602         int rc;
7603         struct bmic_sense_subsystem_info *sense_info;
7604
7605         sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7606         if (!sense_info)
7607                 return -ENOMEM;
7608
7609         rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7610         if (rc)
7611                 goto out;
7612
7613         memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7614                 sizeof(sense_info->ctrl_serial_number));
7615         ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7616
7617 out:
7618         kfree(sense_info);
7619
7620         return rc;
7621 }
7622
7623 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7624 {
7625         int rc;
7626         struct bmic_identify_controller *identify;
7627
7628         identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7629         if (!identify)
7630                 return -ENOMEM;
7631
7632         rc = pqi_identify_controller(ctrl_info, identify);
7633         if (rc)
7634                 goto out;
7635
7636         if (get_unaligned_le32(&identify->extra_controller_flags) &
7637                 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7638                 memcpy(ctrl_info->firmware_version,
7639                         identify->firmware_version_long,
7640                         sizeof(identify->firmware_version_long));
7641         } else {
7642                 memcpy(ctrl_info->firmware_version,
7643                         identify->firmware_version_short,
7644                         sizeof(identify->firmware_version_short));
7645                 ctrl_info->firmware_version
7646                         [sizeof(identify->firmware_version_short)] = '\0';
7647                 snprintf(ctrl_info->firmware_version +
7648                         strlen(ctrl_info->firmware_version),
7649                         sizeof(ctrl_info->firmware_version) -
7650                         sizeof(identify->firmware_version_short),
7651                         "-%u",
7652                         get_unaligned_le16(&identify->firmware_build_number));
7653         }
7654
7655         memcpy(ctrl_info->model, identify->product_id,
7656                 sizeof(identify->product_id));
7657         ctrl_info->model[sizeof(identify->product_id)] = '\0';
7658
7659         memcpy(ctrl_info->vendor, identify->vendor_id,
7660                 sizeof(identify->vendor_id));
7661         ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7662
7663         dev_info(&ctrl_info->pci_dev->dev,
7664                 "Firmware version: %s\n", ctrl_info->firmware_version);
7665
7666 out:
7667         kfree(identify);
7668
7669         return rc;
7670 }
7671
7672 struct pqi_config_table_section_info {
7673         struct pqi_ctrl_info *ctrl_info;
7674         void            *section;
7675         u32             section_offset;
7676         void __iomem    *section_iomem_addr;
7677 };
7678
7679 static inline bool pqi_is_firmware_feature_supported(
7680         struct pqi_config_table_firmware_features *firmware_features,
7681         unsigned int bit_position)
7682 {
7683         unsigned int byte_index;
7684
7685         byte_index = bit_position / BITS_PER_BYTE;
7686
7687         if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7688                 return false;
7689
7690         return firmware_features->features_supported[byte_index] &
7691                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7692 }
7693
7694 static inline bool pqi_is_firmware_feature_enabled(
7695         struct pqi_config_table_firmware_features *firmware_features,
7696         void __iomem *firmware_features_iomem_addr,
7697         unsigned int bit_position)
7698 {
7699         unsigned int byte_index;
7700         u8 __iomem *features_enabled_iomem_addr;
7701
7702         byte_index = (bit_position / BITS_PER_BYTE) +
7703                 (le16_to_cpu(firmware_features->num_elements) * 2);
7704
7705         features_enabled_iomem_addr = firmware_features_iomem_addr +
7706                 offsetof(struct pqi_config_table_firmware_features,
7707                         features_supported) + byte_index;
7708
7709         return *((__force u8 *)features_enabled_iomem_addr) &
7710                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7711 }
7712
7713 static inline void pqi_request_firmware_feature(
7714         struct pqi_config_table_firmware_features *firmware_features,
7715         unsigned int bit_position)
7716 {
7717         unsigned int byte_index;
7718
7719         byte_index = (bit_position / BITS_PER_BYTE) +
7720                 le16_to_cpu(firmware_features->num_elements);
7721
7722         firmware_features->features_supported[byte_index] |=
7723                 (1 << (bit_position % BITS_PER_BYTE));
7724 }
7725
7726 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7727         u16 first_section, u16 last_section)
7728 {
7729         struct pqi_vendor_general_request request;
7730
7731         memset(&request, 0, sizeof(request));
7732
7733         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7734         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7735                 &request.header.iu_length);
7736         put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7737                 &request.function_code);
7738         put_unaligned_le16(first_section,
7739                 &request.data.config_table_update.first_section);
7740         put_unaligned_le16(last_section,
7741                 &request.data.config_table_update.last_section);
7742
7743         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7744 }
7745
7746 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7747         struct pqi_config_table_firmware_features *firmware_features,
7748         void __iomem *firmware_features_iomem_addr)
7749 {
7750         void *features_requested;
7751         void __iomem *features_requested_iomem_addr;
7752         void __iomem *host_max_known_feature_iomem_addr;
7753
7754         features_requested = firmware_features->features_supported +
7755                 le16_to_cpu(firmware_features->num_elements);
7756
7757         features_requested_iomem_addr = firmware_features_iomem_addr +
7758                 (features_requested - (void *)firmware_features);
7759
7760         memcpy_toio(features_requested_iomem_addr, features_requested,
7761                 le16_to_cpu(firmware_features->num_elements));
7762
7763         if (pqi_is_firmware_feature_supported(firmware_features,
7764                 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7765                 host_max_known_feature_iomem_addr =
7766                         features_requested_iomem_addr +
7767                         (le16_to_cpu(firmware_features->num_elements) * 2) +
7768                         sizeof(__le16);
7769                 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7770                 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7771         }
7772
7773         return pqi_config_table_update(ctrl_info,
7774                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7775                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7776 }
7777
7778 struct pqi_firmware_feature {
7779         char            *feature_name;
7780         unsigned int    feature_bit;
7781         bool            supported;
7782         bool            enabled;
7783         void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7784                 struct pqi_firmware_feature *firmware_feature);
7785 };
7786
7787 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7788         struct pqi_firmware_feature *firmware_feature)
7789 {
7790         if (!firmware_feature->supported) {
7791                 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7792                         firmware_feature->feature_name);
7793                 return;
7794         }
7795
7796         if (firmware_feature->enabled) {
7797                 dev_info(&ctrl_info->pci_dev->dev,
7798                         "%s enabled\n", firmware_feature->feature_name);
7799                 return;
7800         }
7801
7802         dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7803                 firmware_feature->feature_name);
7804 }
7805
7806 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7807         struct pqi_firmware_feature *firmware_feature)
7808 {
7809         switch (firmware_feature->feature_bit) {
7810         case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7811                 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7812                 break;
7813         case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7814                 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7815                 break;
7816         case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7817                 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7818                 break;
7819         case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7820                 ctrl_info->soft_reset_handshake_supported =
7821                         firmware_feature->enabled &&
7822                         pqi_read_soft_reset_status(ctrl_info);
7823                 break;
7824         case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7825                 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7826                 break;
7827         case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7828                 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7829                 break;
7830         case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7831                 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7832                 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7833                 break;
7834         case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7835                 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7836                 break;
7837         case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7838                 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7839                 break;
7840         }
7841
7842         pqi_firmware_feature_status(ctrl_info, firmware_feature);
7843 }
7844
7845 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7846         struct pqi_firmware_feature *firmware_feature)
7847 {
7848         if (firmware_feature->feature_status)
7849                 firmware_feature->feature_status(ctrl_info, firmware_feature);
7850 }
7851
7852 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7853
7854 static struct pqi_firmware_feature pqi_firmware_features[] = {
7855         {
7856                 .feature_name = "Online Firmware Activation",
7857                 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7858                 .feature_status = pqi_firmware_feature_status,
7859         },
7860         {
7861                 .feature_name = "Serial Management Protocol",
7862                 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7863                 .feature_status = pqi_firmware_feature_status,
7864         },
7865         {
7866                 .feature_name = "Maximum Known Feature",
7867                 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7868                 .feature_status = pqi_firmware_feature_status,
7869         },
7870         {
7871                 .feature_name = "RAID 0 Read Bypass",
7872                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7873                 .feature_status = pqi_firmware_feature_status,
7874         },
7875         {
7876                 .feature_name = "RAID 1 Read Bypass",
7877                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7878                 .feature_status = pqi_firmware_feature_status,
7879         },
7880         {
7881                 .feature_name = "RAID 5 Read Bypass",
7882                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7883                 .feature_status = pqi_firmware_feature_status,
7884         },
7885         {
7886                 .feature_name = "RAID 6 Read Bypass",
7887                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7888                 .feature_status = pqi_firmware_feature_status,
7889         },
7890         {
7891                 .feature_name = "RAID 0 Write Bypass",
7892                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7893                 .feature_status = pqi_firmware_feature_status,
7894         },
7895         {
7896                 .feature_name = "RAID 1 Write Bypass",
7897                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7898                 .feature_status = pqi_ctrl_update_feature_flags,
7899         },
7900         {
7901                 .feature_name = "RAID 5 Write Bypass",
7902                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7903                 .feature_status = pqi_ctrl_update_feature_flags,
7904         },
7905         {
7906                 .feature_name = "RAID 6 Write Bypass",
7907                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7908                 .feature_status = pqi_ctrl_update_feature_flags,
7909         },
7910         {
7911                 .feature_name = "New Soft Reset Handshake",
7912                 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
7913                 .feature_status = pqi_ctrl_update_feature_flags,
7914         },
7915         {
7916                 .feature_name = "RAID IU Timeout",
7917                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
7918                 .feature_status = pqi_ctrl_update_feature_flags,
7919         },
7920         {
7921                 .feature_name = "TMF IU Timeout",
7922                 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
7923                 .feature_status = pqi_ctrl_update_feature_flags,
7924         },
7925         {
7926                 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
7927                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
7928                 .feature_status = pqi_firmware_feature_status,
7929         },
7930         {
7931                 .feature_name = "Firmware Triage",
7932                 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
7933                 .feature_status = pqi_ctrl_update_feature_flags,
7934         },
7935         {
7936                 .feature_name = "RPL Extended Formats 4 and 5",
7937                 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
7938                 .feature_status = pqi_ctrl_update_feature_flags,
7939         },
7940         {
7941                 .feature_name = "Multi-LUN Target",
7942                 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
7943                 .feature_status = pqi_ctrl_update_feature_flags,
7944         },
7945 };
7946
7947 static void pqi_process_firmware_features(
7948         struct pqi_config_table_section_info *section_info)
7949 {
7950         int rc;
7951         struct pqi_ctrl_info *ctrl_info;
7952         struct pqi_config_table_firmware_features *firmware_features;
7953         void __iomem *firmware_features_iomem_addr;
7954         unsigned int i;
7955         unsigned int num_features_supported;
7956
7957         ctrl_info = section_info->ctrl_info;
7958         firmware_features = section_info->section;
7959         firmware_features_iomem_addr = section_info->section_iomem_addr;
7960
7961         for (i = 0, num_features_supported = 0;
7962                 i < ARRAY_SIZE(pqi_firmware_features); i++) {
7963                 if (pqi_is_firmware_feature_supported(firmware_features,
7964                         pqi_firmware_features[i].feature_bit)) {
7965                         pqi_firmware_features[i].supported = true;
7966                         num_features_supported++;
7967                 } else {
7968                         pqi_firmware_feature_update(ctrl_info,
7969                                 &pqi_firmware_features[i]);
7970                 }
7971         }
7972
7973         if (num_features_supported == 0)
7974                 return;
7975
7976         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7977                 if (!pqi_firmware_features[i].supported)
7978                         continue;
7979                 pqi_request_firmware_feature(firmware_features,
7980                         pqi_firmware_features[i].feature_bit);
7981         }
7982
7983         rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
7984                 firmware_features_iomem_addr);
7985         if (rc) {
7986                 dev_err(&ctrl_info->pci_dev->dev,
7987                         "failed to enable firmware features in PQI configuration table\n");
7988                 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7989                         if (!pqi_firmware_features[i].supported)
7990                                 continue;
7991                         pqi_firmware_feature_update(ctrl_info,
7992                                 &pqi_firmware_features[i]);
7993                 }
7994                 return;
7995         }
7996
7997         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
7998                 if (!pqi_firmware_features[i].supported)
7999                         continue;
8000                 if (pqi_is_firmware_feature_enabled(firmware_features,
8001                         firmware_features_iomem_addr,
8002                         pqi_firmware_features[i].feature_bit)) {
8003                                 pqi_firmware_features[i].enabled = true;
8004                 }
8005                 pqi_firmware_feature_update(ctrl_info,
8006                         &pqi_firmware_features[i]);
8007         }
8008 }
8009
8010 static void pqi_init_firmware_features(void)
8011 {
8012         unsigned int i;
8013
8014         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8015                 pqi_firmware_features[i].supported = false;
8016                 pqi_firmware_features[i].enabled = false;
8017         }
8018 }
8019
8020 static void pqi_process_firmware_features_section(
8021         struct pqi_config_table_section_info *section_info)
8022 {
8023         mutex_lock(&pqi_firmware_features_mutex);
8024         pqi_init_firmware_features();
8025         pqi_process_firmware_features(section_info);
8026         mutex_unlock(&pqi_firmware_features_mutex);
8027 }
8028
8029 /*
8030  * Reset all controller settings that can be initialized during the processing
8031  * of the PQI Configuration Table.
8032  */
8033
8034 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
8035 {
8036         ctrl_info->heartbeat_counter = NULL;
8037         ctrl_info->soft_reset_status = NULL;
8038         ctrl_info->soft_reset_handshake_supported = false;
8039         ctrl_info->enable_r1_writes = false;
8040         ctrl_info->enable_r5_writes = false;
8041         ctrl_info->enable_r6_writes = false;
8042         ctrl_info->raid_iu_timeout_supported = false;
8043         ctrl_info->tmf_iu_timeout_supported = false;
8044         ctrl_info->firmware_triage_supported = false;
8045         ctrl_info->rpl_extended_format_4_5_supported = false;
8046         ctrl_info->multi_lun_device_supported = false;
8047 }
8048
8049 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
8050 {
8051         u32 table_length;
8052         u32 section_offset;
8053         bool firmware_feature_section_present;
8054         void __iomem *table_iomem_addr;
8055         struct pqi_config_table *config_table;
8056         struct pqi_config_table_section_header *section;
8057         struct pqi_config_table_section_info section_info;
8058         struct pqi_config_table_section_info feature_section_info = {0};
8059
8060         table_length = ctrl_info->config_table_length;
8061         if (table_length == 0)
8062                 return 0;
8063
8064         config_table = kmalloc(table_length, GFP_KERNEL);
8065         if (!config_table) {
8066                 dev_err(&ctrl_info->pci_dev->dev,
8067                         "failed to allocate memory for PQI configuration table\n");
8068                 return -ENOMEM;
8069         }
8070
8071         /*
8072          * Copy the config table contents from I/O memory space into the
8073          * temporary buffer.
8074          */
8075         table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
8076         memcpy_fromio(config_table, table_iomem_addr, table_length);
8077
8078         firmware_feature_section_present = false;
8079         section_info.ctrl_info = ctrl_info;
8080         section_offset = get_unaligned_le32(&config_table->first_section_offset);
8081
8082         while (section_offset) {
8083                 section = (void *)config_table + section_offset;
8084
8085                 section_info.section = section;
8086                 section_info.section_offset = section_offset;
8087                 section_info.section_iomem_addr = table_iomem_addr + section_offset;
8088
8089                 switch (get_unaligned_le16(&section->section_id)) {
8090                 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
8091                         firmware_feature_section_present = true;
8092                         feature_section_info = section_info;
8093                         break;
8094                 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
8095                         if (pqi_disable_heartbeat)
8096                                 dev_warn(&ctrl_info->pci_dev->dev,
8097                                 "heartbeat disabled by module parameter\n");
8098                         else
8099                                 ctrl_info->heartbeat_counter =
8100                                         table_iomem_addr +
8101                                         section_offset +
8102                                         offsetof(struct pqi_config_table_heartbeat,
8103                                                 heartbeat_counter);
8104                         break;
8105                 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8106                         ctrl_info->soft_reset_status =
8107                                 table_iomem_addr +
8108                                 section_offset +
8109                                 offsetof(struct pqi_config_table_soft_reset,
8110                                         soft_reset_status);
8111                         break;
8112                 }
8113
8114                 section_offset = get_unaligned_le16(&section->next_section_offset);
8115         }
8116
8117         /*
8118          * We process the firmware feature section after all other sections
8119          * have been processed so that the feature bit callbacks can take
8120          * into account the settings configured by other sections.
8121          */
8122         if (firmware_feature_section_present)
8123                 pqi_process_firmware_features_section(&feature_section_info);
8124
8125         kfree(config_table);
8126
8127         return 0;
8128 }
8129
8130 /* Switches the controller from PQI mode back into SIS mode. */
8131
8132 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8133 {
8134         int rc;
8135
8136         pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8137         rc = pqi_reset(ctrl_info);
8138         if (rc)
8139                 return rc;
8140         rc = sis_reenable_sis_mode(ctrl_info);
8141         if (rc) {
8142                 dev_err(&ctrl_info->pci_dev->dev,
8143                         "re-enabling SIS mode failed with error %d\n", rc);
8144                 return rc;
8145         }
8146         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8147
8148         return 0;
8149 }
8150
8151 /*
8152  * If the controller isn't already in SIS mode, this function forces it into
8153  * SIS mode.
8154  */
8155
8156 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8157 {
8158         if (!sis_is_firmware_running(ctrl_info))
8159                 return -ENXIO;
8160
8161         if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8162                 return 0;
8163
8164         if (sis_is_kernel_up(ctrl_info)) {
8165                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8166                 return 0;
8167         }
8168
8169         return pqi_revert_to_sis_mode(ctrl_info);
8170 }
8171
8172 static void pqi_perform_lockup_action(void)
8173 {
8174         switch (pqi_lockup_action) {
8175         case PANIC:
8176                 panic("FATAL: Smart Family Controller lockup detected");
8177                 break;
8178         case REBOOT:
8179                 emergency_restart();
8180                 break;
8181         case NONE:
8182         default:
8183                 break;
8184         }
8185 }
8186
8187 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8188 {
8189         int rc;
8190         u32 product_id;
8191
8192         if (reset_devices) {
8193                 if (pqi_is_fw_triage_supported(ctrl_info)) {
8194                         rc = sis_wait_for_fw_triage_completion(ctrl_info);
8195                         if (rc)
8196                                 return rc;
8197                 }
8198                 sis_soft_reset(ctrl_info);
8199                 ssleep(PQI_POST_RESET_DELAY_SECS);
8200         } else {
8201                 rc = pqi_force_sis_mode(ctrl_info);
8202                 if (rc)
8203                         return rc;
8204         }
8205
8206         /*
8207          * Wait until the controller is ready to start accepting SIS
8208          * commands.
8209          */
8210         rc = sis_wait_for_ctrl_ready(ctrl_info);
8211         if (rc) {
8212                 if (reset_devices) {
8213                         dev_err(&ctrl_info->pci_dev->dev,
8214                                 "kdump init failed with error %d\n", rc);
8215                         pqi_lockup_action = REBOOT;
8216                         pqi_perform_lockup_action();
8217                 }
8218                 return rc;
8219         }
8220
8221         /*
8222          * Get the controller properties.  This allows us to determine
8223          * whether or not it supports PQI mode.
8224          */
8225         rc = sis_get_ctrl_properties(ctrl_info);
8226         if (rc) {
8227                 dev_err(&ctrl_info->pci_dev->dev,
8228                         "error obtaining controller properties\n");
8229                 return rc;
8230         }
8231
8232         rc = sis_get_pqi_capabilities(ctrl_info);
8233         if (rc) {
8234                 dev_err(&ctrl_info->pci_dev->dev,
8235                         "error obtaining controller capabilities\n");
8236                 return rc;
8237         }
8238
8239         product_id = sis_get_product_id(ctrl_info);
8240         ctrl_info->product_id = (u8)product_id;
8241         ctrl_info->product_revision = (u8)(product_id >> 8);
8242
8243         if (reset_devices) {
8244                 if (ctrl_info->max_outstanding_requests >
8245                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8246                                 ctrl_info->max_outstanding_requests =
8247                                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8248         } else {
8249                 if (ctrl_info->max_outstanding_requests >
8250                         PQI_MAX_OUTSTANDING_REQUESTS)
8251                                 ctrl_info->max_outstanding_requests =
8252                                         PQI_MAX_OUTSTANDING_REQUESTS;
8253         }
8254
8255         pqi_calculate_io_resources(ctrl_info);
8256
8257         rc = pqi_alloc_error_buffer(ctrl_info);
8258         if (rc) {
8259                 dev_err(&ctrl_info->pci_dev->dev,
8260                         "failed to allocate PQI error buffer\n");
8261                 return rc;
8262         }
8263
8264         /*
8265          * If the function we are about to call succeeds, the
8266          * controller will transition from legacy SIS mode
8267          * into PQI mode.
8268          */
8269         rc = sis_init_base_struct_addr(ctrl_info);
8270         if (rc) {
8271                 dev_err(&ctrl_info->pci_dev->dev,
8272                         "error initializing PQI mode\n");
8273                 return rc;
8274         }
8275
8276         /* Wait for the controller to complete the SIS -> PQI transition. */
8277         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8278         if (rc) {
8279                 dev_err(&ctrl_info->pci_dev->dev,
8280                         "transition to PQI mode failed\n");
8281                 return rc;
8282         }
8283
8284         /* From here on, we are running in PQI mode. */
8285         ctrl_info->pqi_mode_enabled = true;
8286         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8287
8288         rc = pqi_alloc_admin_queues(ctrl_info);
8289         if (rc) {
8290                 dev_err(&ctrl_info->pci_dev->dev,
8291                         "failed to allocate admin queues\n");
8292                 return rc;
8293         }
8294
8295         rc = pqi_create_admin_queues(ctrl_info);
8296         if (rc) {
8297                 dev_err(&ctrl_info->pci_dev->dev,
8298                         "error creating admin queues\n");
8299                 return rc;
8300         }
8301
8302         rc = pqi_report_device_capability(ctrl_info);
8303         if (rc) {
8304                 dev_err(&ctrl_info->pci_dev->dev,
8305                         "obtaining device capability failed\n");
8306                 return rc;
8307         }
8308
8309         rc = pqi_validate_device_capability(ctrl_info);
8310         if (rc)
8311                 return rc;
8312
8313         pqi_calculate_queue_resources(ctrl_info);
8314
8315         rc = pqi_enable_msix_interrupts(ctrl_info);
8316         if (rc)
8317                 return rc;
8318
8319         if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8320                 ctrl_info->max_msix_vectors =
8321                         ctrl_info->num_msix_vectors_enabled;
8322                 pqi_calculate_queue_resources(ctrl_info);
8323         }
8324
8325         rc = pqi_alloc_io_resources(ctrl_info);
8326         if (rc)
8327                 return rc;
8328
8329         rc = pqi_alloc_operational_queues(ctrl_info);
8330         if (rc) {
8331                 dev_err(&ctrl_info->pci_dev->dev,
8332                         "failed to allocate operational queues\n");
8333                 return rc;
8334         }
8335
8336         pqi_init_operational_queues(ctrl_info);
8337
8338         rc = pqi_create_queues(ctrl_info);
8339         if (rc)
8340                 return rc;
8341
8342         rc = pqi_request_irqs(ctrl_info);
8343         if (rc)
8344                 return rc;
8345
8346         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8347
8348         ctrl_info->controller_online = true;
8349
8350         rc = pqi_process_config_table(ctrl_info);
8351         if (rc)
8352                 return rc;
8353
8354         pqi_start_heartbeat_timer(ctrl_info);
8355
8356         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8357                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8358                 if (rc) { /* Supported features not returned correctly. */
8359                         dev_err(&ctrl_info->pci_dev->dev,
8360                                 "error obtaining advanced RAID bypass configuration\n");
8361                         return rc;
8362                 }
8363                 ctrl_info->ciss_report_log_flags |=
8364                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8365         }
8366
8367         rc = pqi_enable_events(ctrl_info);
8368         if (rc) {
8369                 dev_err(&ctrl_info->pci_dev->dev,
8370                         "error enabling events\n");
8371                 return rc;
8372         }
8373
8374         /* Register with the SCSI subsystem. */
8375         rc = pqi_register_scsi(ctrl_info);
8376         if (rc)
8377                 return rc;
8378
8379         rc = pqi_get_ctrl_product_details(ctrl_info);
8380         if (rc) {
8381                 dev_err(&ctrl_info->pci_dev->dev,
8382                         "error obtaining product details\n");
8383                 return rc;
8384         }
8385
8386         rc = pqi_get_ctrl_serial_number(ctrl_info);
8387         if (rc) {
8388                 dev_err(&ctrl_info->pci_dev->dev,
8389                         "error obtaining ctrl serial number\n");
8390                 return rc;
8391         }
8392
8393         rc = pqi_set_diag_rescan(ctrl_info);
8394         if (rc) {
8395                 dev_err(&ctrl_info->pci_dev->dev,
8396                         "error enabling multi-lun rescan\n");
8397                 return rc;
8398         }
8399
8400         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8401         if (rc) {
8402                 dev_err(&ctrl_info->pci_dev->dev,
8403                         "error updating host wellness\n");
8404                 return rc;
8405         }
8406
8407         pqi_schedule_update_time_worker(ctrl_info);
8408
8409         pqi_scan_scsi_devices(ctrl_info);
8410
8411         return 0;
8412 }
8413
8414 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8415 {
8416         unsigned int i;
8417         struct pqi_admin_queues *admin_queues;
8418         struct pqi_event_queue *event_queue;
8419
8420         admin_queues = &ctrl_info->admin_queues;
8421         admin_queues->iq_pi_copy = 0;
8422         admin_queues->oq_ci_copy = 0;
8423         writel(0, admin_queues->oq_pi);
8424
8425         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8426                 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8427                 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8428                 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8429
8430                 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8431                 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8432                 writel(0, ctrl_info->queue_groups[i].oq_pi);
8433         }
8434
8435         event_queue = &ctrl_info->event_queue;
8436         writel(0, event_queue->oq_pi);
8437         event_queue->oq_ci_copy = 0;
8438 }
8439
8440 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8441 {
8442         int rc;
8443
8444         rc = pqi_force_sis_mode(ctrl_info);
8445         if (rc)
8446                 return rc;
8447
8448         /*
8449          * Wait until the controller is ready to start accepting SIS
8450          * commands.
8451          */
8452         rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8453         if (rc)
8454                 return rc;
8455
8456         /*
8457          * Get the controller properties.  This allows us to determine
8458          * whether or not it supports PQI mode.
8459          */
8460         rc = sis_get_ctrl_properties(ctrl_info);
8461         if (rc) {
8462                 dev_err(&ctrl_info->pci_dev->dev,
8463                         "error obtaining controller properties\n");
8464                 return rc;
8465         }
8466
8467         rc = sis_get_pqi_capabilities(ctrl_info);
8468         if (rc) {
8469                 dev_err(&ctrl_info->pci_dev->dev,
8470                         "error obtaining controller capabilities\n");
8471                 return rc;
8472         }
8473
8474         /*
8475          * If the function we are about to call succeeds, the
8476          * controller will transition from legacy SIS mode
8477          * into PQI mode.
8478          */
8479         rc = sis_init_base_struct_addr(ctrl_info);
8480         if (rc) {
8481                 dev_err(&ctrl_info->pci_dev->dev,
8482                         "error initializing PQI mode\n");
8483                 return rc;
8484         }
8485
8486         /* Wait for the controller to complete the SIS -> PQI transition. */
8487         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8488         if (rc) {
8489                 dev_err(&ctrl_info->pci_dev->dev,
8490                         "transition to PQI mode failed\n");
8491                 return rc;
8492         }
8493
8494         /* From here on, we are running in PQI mode. */
8495         ctrl_info->pqi_mode_enabled = true;
8496         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8497
8498         pqi_reinit_queues(ctrl_info);
8499
8500         rc = pqi_create_admin_queues(ctrl_info);
8501         if (rc) {
8502                 dev_err(&ctrl_info->pci_dev->dev,
8503                         "error creating admin queues\n");
8504                 return rc;
8505         }
8506
8507         rc = pqi_create_queues(ctrl_info);
8508         if (rc)
8509                 return rc;
8510
8511         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8512
8513         ctrl_info->controller_online = true;
8514         pqi_ctrl_unblock_requests(ctrl_info);
8515
8516         pqi_ctrl_reset_config(ctrl_info);
8517
8518         rc = pqi_process_config_table(ctrl_info);
8519         if (rc)
8520                 return rc;
8521
8522         pqi_start_heartbeat_timer(ctrl_info);
8523
8524         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8525                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8526                 if (rc) {
8527                         dev_err(&ctrl_info->pci_dev->dev,
8528                                 "error obtaining advanced RAID bypass configuration\n");
8529                         return rc;
8530                 }
8531                 ctrl_info->ciss_report_log_flags |=
8532                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8533         }
8534
8535         rc = pqi_enable_events(ctrl_info);
8536         if (rc) {
8537                 dev_err(&ctrl_info->pci_dev->dev,
8538                         "error enabling events\n");
8539                 return rc;
8540         }
8541
8542         rc = pqi_get_ctrl_product_details(ctrl_info);
8543         if (rc) {
8544                 dev_err(&ctrl_info->pci_dev->dev,
8545                         "error obtaining product details\n");
8546                 return rc;
8547         }
8548
8549         rc = pqi_set_diag_rescan(ctrl_info);
8550         if (rc) {
8551                 dev_err(&ctrl_info->pci_dev->dev,
8552                         "error enabling multi-lun rescan\n");
8553                 return rc;
8554         }
8555
8556         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8557         if (rc) {
8558                 dev_err(&ctrl_info->pci_dev->dev,
8559                         "error updating host wellness\n");
8560                 return rc;
8561         }
8562
8563         if (pqi_ofa_in_progress(ctrl_info))
8564                 pqi_ctrl_unblock_scan(ctrl_info);
8565
8566         pqi_scan_scsi_devices(ctrl_info);
8567
8568         return 0;
8569 }
8570
8571 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8572 {
8573         int rc;
8574
8575         rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8576                 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8577
8578         return pcibios_err_to_errno(rc);
8579 }
8580
8581 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8582 {
8583         int rc;
8584         u64 mask;
8585
8586         rc = pci_enable_device(ctrl_info->pci_dev);
8587         if (rc) {
8588                 dev_err(&ctrl_info->pci_dev->dev,
8589                         "failed to enable PCI device\n");
8590                 return rc;
8591         }
8592
8593         if (sizeof(dma_addr_t) > 4)
8594                 mask = DMA_BIT_MASK(64);
8595         else
8596                 mask = DMA_BIT_MASK(32);
8597
8598         rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8599         if (rc) {
8600                 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8601                 goto disable_device;
8602         }
8603
8604         rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8605         if (rc) {
8606                 dev_err(&ctrl_info->pci_dev->dev,
8607                         "failed to obtain PCI resources\n");
8608                 goto disable_device;
8609         }
8610
8611         ctrl_info->iomem_base = ioremap(pci_resource_start(
8612                 ctrl_info->pci_dev, 0),
8613                 pci_resource_len(ctrl_info->pci_dev, 0));
8614         if (!ctrl_info->iomem_base) {
8615                 dev_err(&ctrl_info->pci_dev->dev,
8616                         "failed to map memory for controller registers\n");
8617                 rc = -ENOMEM;
8618                 goto release_regions;
8619         }
8620
8621 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS               0x6
8622
8623         /* Increase the PCIe completion timeout. */
8624         rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8625                 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8626         if (rc) {
8627                 dev_err(&ctrl_info->pci_dev->dev,
8628                         "failed to set PCIe completion timeout\n");
8629                 goto release_regions;
8630         }
8631
8632         /* Enable bus mastering. */
8633         pci_set_master(ctrl_info->pci_dev);
8634
8635         ctrl_info->registers = ctrl_info->iomem_base;
8636         ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8637
8638         pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8639
8640         return 0;
8641
8642 release_regions:
8643         pci_release_regions(ctrl_info->pci_dev);
8644 disable_device:
8645         pci_disable_device(ctrl_info->pci_dev);
8646
8647         return rc;
8648 }
8649
8650 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8651 {
8652         iounmap(ctrl_info->iomem_base);
8653         pci_release_regions(ctrl_info->pci_dev);
8654         if (pci_is_enabled(ctrl_info->pci_dev))
8655                 pci_disable_device(ctrl_info->pci_dev);
8656         pci_set_drvdata(ctrl_info->pci_dev, NULL);
8657 }
8658
8659 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8660 {
8661         struct pqi_ctrl_info *ctrl_info;
8662
8663         ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8664                         GFP_KERNEL, numa_node);
8665         if (!ctrl_info)
8666                 return NULL;
8667
8668         mutex_init(&ctrl_info->scan_mutex);
8669         mutex_init(&ctrl_info->lun_reset_mutex);
8670         mutex_init(&ctrl_info->ofa_mutex);
8671
8672         INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8673         spin_lock_init(&ctrl_info->scsi_device_list_lock);
8674
8675         INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8676         atomic_set(&ctrl_info->num_interrupts, 0);
8677
8678         INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8679         INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8680
8681         timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8682         INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8683
8684         INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8685         INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8686
8687         sema_init(&ctrl_info->sync_request_sem,
8688                 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8689         init_waitqueue_head(&ctrl_info->block_requests_wait);
8690
8691         ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8692         ctrl_info->irq_mode = IRQ_MODE_NONE;
8693         ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8694
8695         ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8696         ctrl_info->max_transfer_encrypted_sas_sata =
8697                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8698         ctrl_info->max_transfer_encrypted_nvme =
8699                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8700         ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8701         ctrl_info->max_write_raid_1_10_2drive = ~0;
8702         ctrl_info->max_write_raid_1_10_3drive = ~0;
8703         ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8704
8705         return ctrl_info;
8706 }
8707
8708 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8709 {
8710         kfree(ctrl_info);
8711 }
8712
8713 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8714 {
8715         pqi_free_irqs(ctrl_info);
8716         pqi_disable_msix_interrupts(ctrl_info);
8717 }
8718
8719 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8720 {
8721         pqi_free_interrupts(ctrl_info);
8722         if (ctrl_info->queue_memory_base)
8723                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8724                         ctrl_info->queue_memory_length,
8725                         ctrl_info->queue_memory_base,
8726                         ctrl_info->queue_memory_base_dma_handle);
8727         if (ctrl_info->admin_queue_memory_base)
8728                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8729                         ctrl_info->admin_queue_memory_length,
8730                         ctrl_info->admin_queue_memory_base,
8731                         ctrl_info->admin_queue_memory_base_dma_handle);
8732         pqi_free_all_io_requests(ctrl_info);
8733         if (ctrl_info->error_buffer)
8734                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8735                         ctrl_info->error_buffer_length,
8736                         ctrl_info->error_buffer,
8737                         ctrl_info->error_buffer_dma_handle);
8738         if (ctrl_info->iomem_base)
8739                 pqi_cleanup_pci_init(ctrl_info);
8740         pqi_free_ctrl_info(ctrl_info);
8741 }
8742
8743 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8744 {
8745         ctrl_info->controller_online = false;
8746         pqi_stop_heartbeat_timer(ctrl_info);
8747         pqi_ctrl_block_requests(ctrl_info);
8748         pqi_cancel_rescan_worker(ctrl_info);
8749         pqi_cancel_update_time_worker(ctrl_info);
8750         if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8751                 pqi_fail_all_outstanding_requests(ctrl_info);
8752                 ctrl_info->pqi_mode_enabled = false;
8753         }
8754         pqi_unregister_scsi(ctrl_info);
8755         if (ctrl_info->pqi_mode_enabled)
8756                 pqi_revert_to_sis_mode(ctrl_info);
8757         pqi_free_ctrl_resources(ctrl_info);
8758 }
8759
8760 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8761 {
8762         pqi_ctrl_block_scan(ctrl_info);
8763         pqi_scsi_block_requests(ctrl_info);
8764         pqi_ctrl_block_device_reset(ctrl_info);
8765         pqi_ctrl_block_requests(ctrl_info);
8766         pqi_ctrl_wait_until_quiesced(ctrl_info);
8767         pqi_stop_heartbeat_timer(ctrl_info);
8768 }
8769
8770 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8771 {
8772         pqi_start_heartbeat_timer(ctrl_info);
8773         pqi_ctrl_unblock_requests(ctrl_info);
8774         pqi_ctrl_unblock_device_reset(ctrl_info);
8775         pqi_scsi_unblock_requests(ctrl_info);
8776         pqi_ctrl_unblock_scan(ctrl_info);
8777 }
8778
8779 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
8780 {
8781         int i;
8782         u32 sg_count;
8783         struct device *dev;
8784         struct pqi_ofa_memory *ofap;
8785         struct pqi_sg_descriptor *mem_descriptor;
8786         dma_addr_t dma_handle;
8787
8788         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8789
8790         sg_count = DIV_ROUND_UP(total_size, chunk_size);
8791         if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS)
8792                 goto out;
8793
8794         ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL);
8795         if (!ctrl_info->pqi_ofa_chunk_virt_addr)
8796                 goto out;
8797
8798         dev = &ctrl_info->pci_dev->dev;
8799
8800         for (i = 0; i < sg_count; i++) {
8801                 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
8802                         dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8803                 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
8804                         goto out_free_chunks;
8805                 mem_descriptor = &ofap->sg_descriptor[i];
8806                 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8807                 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8808         }
8809
8810         put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8811         put_unaligned_le16(sg_count, &ofap->num_memory_descriptors);
8812         put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated);
8813
8814         return 0;
8815
8816 out_free_chunks:
8817         while (--i >= 0) {
8818                 mem_descriptor = &ofap->sg_descriptor[i];
8819                 dma_free_coherent(dev, chunk_size,
8820                         ctrl_info->pqi_ofa_chunk_virt_addr[i],
8821                         get_unaligned_le64(&mem_descriptor->address));
8822         }
8823         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8824
8825 out:
8826         return -ENOMEM;
8827 }
8828
8829 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
8830 {
8831         u32 total_size;
8832         u32 chunk_size;
8833         u32 min_chunk_size;
8834
8835         if (ctrl_info->ofa_bytes_requested == 0)
8836                 return 0;
8837
8838         total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested);
8839         min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS);
8840         min_chunk_size = PAGE_ALIGN(min_chunk_size);
8841
8842         for (chunk_size = total_size; chunk_size >= min_chunk_size;) {
8843                 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0)
8844                         return 0;
8845                 chunk_size /= 2;
8846                 chunk_size = PAGE_ALIGN(chunk_size);
8847         }
8848
8849         return -ENOMEM;
8850 }
8851
8852 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
8853 {
8854         struct device *dev;
8855         struct pqi_ofa_memory *ofap;
8856
8857         dev = &ctrl_info->pci_dev->dev;
8858
8859         ofap = dma_alloc_coherent(dev, sizeof(*ofap),
8860                 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL);
8861         if (!ofap)
8862                 return;
8863
8864         ctrl_info->pqi_ofa_mem_virt_addr = ofap;
8865
8866         if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) {
8867                 dev_err(dev,
8868                         "failed to allocate host buffer for Online Firmware Activation\n");
8869                 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle);
8870                 ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8871                 return;
8872         }
8873
8874         put_unaligned_le16(PQI_OFA_VERSION, &ofap->version);
8875         memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature));
8876 }
8877
8878 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
8879 {
8880         unsigned int i;
8881         struct device *dev;
8882         struct pqi_ofa_memory *ofap;
8883         struct pqi_sg_descriptor *mem_descriptor;
8884         unsigned int num_memory_descriptors;
8885
8886         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8887         if (!ofap)
8888                 return;
8889
8890         dev = &ctrl_info->pci_dev->dev;
8891
8892         if (get_unaligned_le32(&ofap->bytes_allocated) == 0)
8893                 goto out;
8894
8895         mem_descriptor = ofap->sg_descriptor;
8896         num_memory_descriptors =
8897                 get_unaligned_le16(&ofap->num_memory_descriptors);
8898
8899         for (i = 0; i < num_memory_descriptors; i++) {
8900                 dma_free_coherent(dev,
8901                         get_unaligned_le32(&mem_descriptor[i].length),
8902                         ctrl_info->pqi_ofa_chunk_virt_addr[i],
8903                         get_unaligned_le64(&mem_descriptor[i].address));
8904         }
8905         kfree(ctrl_info->pqi_ofa_chunk_virt_addr);
8906
8907 out:
8908         dma_free_coherent(dev, sizeof(*ofap), ofap,
8909                 ctrl_info->pqi_ofa_mem_dma_handle);
8910         ctrl_info->pqi_ofa_mem_virt_addr = NULL;
8911 }
8912
8913 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
8914 {
8915         u32 buffer_length;
8916         struct pqi_vendor_general_request request;
8917         struct pqi_ofa_memory *ofap;
8918
8919         memset(&request, 0, sizeof(request));
8920
8921         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
8922         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
8923                 &request.header.iu_length);
8924         put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE,
8925                 &request.function_code);
8926
8927         ofap = ctrl_info->pqi_ofa_mem_virt_addr;
8928
8929         if (ofap) {
8930                 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) +
8931                         get_unaligned_le16(&ofap->num_memory_descriptors) *
8932                         sizeof(struct pqi_sg_descriptor);
8933
8934                 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle,
8935                         &request.data.ofa_memory_allocation.buffer_address);
8936                 put_unaligned_le32(buffer_length,
8937                         &request.data.ofa_memory_allocation.buffer_length);
8938         }
8939
8940         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
8941 }
8942
8943 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8944 {
8945         ssleep(delay_secs);
8946
8947         return pqi_ctrl_init_resume(ctrl_info);
8948 }
8949
8950 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
8951         .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
8952         .status = SAM_STAT_CHECK_CONDITION,
8953 };
8954
8955 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
8956 {
8957         unsigned int i;
8958         struct pqi_io_request *io_request;
8959         struct scsi_cmnd *scmd;
8960         struct scsi_device *sdev;
8961
8962         for (i = 0; i < ctrl_info->max_io_slots; i++) {
8963                 io_request = &ctrl_info->io_request_pool[i];
8964                 if (atomic_read(&io_request->refcount) == 0)
8965                         continue;
8966
8967                 scmd = io_request->scmd;
8968                 if (scmd) {
8969                         sdev = scmd->device;
8970                         if (!sdev || !scsi_device_online(sdev)) {
8971                                 pqi_free_io_request(io_request);
8972                                 continue;
8973                         } else {
8974                                 set_host_byte(scmd, DID_NO_CONNECT);
8975                         }
8976                 } else {
8977                         io_request->status = -ENXIO;
8978                         io_request->error_info =
8979                                 &pqi_ctrl_offline_raid_error_info;
8980                 }
8981
8982                 io_request->io_complete_callback(io_request,
8983                         io_request->context);
8984         }
8985 }
8986
8987 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
8988 {
8989         pqi_perform_lockup_action();
8990         pqi_stop_heartbeat_timer(ctrl_info);
8991         pqi_free_interrupts(ctrl_info);
8992         pqi_cancel_rescan_worker(ctrl_info);
8993         pqi_cancel_update_time_worker(ctrl_info);
8994         pqi_ctrl_wait_until_quiesced(ctrl_info);
8995         pqi_fail_all_outstanding_requests(ctrl_info);
8996         pqi_ctrl_unblock_requests(ctrl_info);
8997 }
8998
8999 static void pqi_ctrl_offline_worker(struct work_struct *work)
9000 {
9001         struct pqi_ctrl_info *ctrl_info;
9002
9003         ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
9004         pqi_take_ctrl_offline_deferred(ctrl_info);
9005 }
9006
9007 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9008 {
9009         char *string;
9010
9011         switch (ctrl_shutdown_reason) {
9012         case PQI_IQ_NOT_DRAINED_TIMEOUT:
9013                 string = "inbound queue not drained timeout";
9014                 break;
9015         case PQI_LUN_RESET_TIMEOUT:
9016                 string = "LUN reset timeout";
9017                 break;
9018         case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
9019                 string = "I/O pending timeout after LUN reset";
9020                 break;
9021         case PQI_NO_HEARTBEAT:
9022                 string = "no controller heartbeat detected";
9023                 break;
9024         case PQI_FIRMWARE_KERNEL_NOT_UP:
9025                 string = "firmware kernel not ready";
9026                 break;
9027         case PQI_OFA_RESPONSE_TIMEOUT:
9028                 string = "OFA response timeout";
9029                 break;
9030         case PQI_INVALID_REQ_ID:
9031                 string = "invalid request ID";
9032                 break;
9033         case PQI_UNMATCHED_REQ_ID:
9034                 string = "unmatched request ID";
9035                 break;
9036         case PQI_IO_PI_OUT_OF_RANGE:
9037                 string = "I/O queue producer index out of range";
9038                 break;
9039         case PQI_EVENT_PI_OUT_OF_RANGE:
9040                 string = "event queue producer index out of range";
9041                 break;
9042         case PQI_UNEXPECTED_IU_TYPE:
9043                 string = "unexpected IU type";
9044                 break;
9045         default:
9046                 string = "unknown reason";
9047                 break;
9048         }
9049
9050         return string;
9051 }
9052
9053 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
9054         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9055 {
9056         if (!ctrl_info->controller_online)
9057                 return;
9058
9059         ctrl_info->controller_online = false;
9060         ctrl_info->pqi_mode_enabled = false;
9061         pqi_ctrl_block_requests(ctrl_info);
9062         if (!pqi_disable_ctrl_shutdown)
9063                 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
9064         pci_disable_device(ctrl_info->pci_dev);
9065         dev_err(&ctrl_info->pci_dev->dev,
9066                 "controller offline: reason code 0x%x (%s)\n",
9067                 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
9068         schedule_work(&ctrl_info->ctrl_offline_work);
9069 }
9070
9071 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
9072         const struct pci_device_id *id)
9073 {
9074         char *ctrl_description;
9075
9076         if (id->driver_data)
9077                 ctrl_description = (char *)id->driver_data;
9078         else
9079                 ctrl_description = "Microchip Smart Family Controller";
9080
9081         dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
9082 }
9083
9084 static int pqi_pci_probe(struct pci_dev *pci_dev,
9085         const struct pci_device_id *id)
9086 {
9087         int rc;
9088         int node;
9089         struct pqi_ctrl_info *ctrl_info;
9090
9091         pqi_print_ctrl_info(pci_dev, id);
9092
9093         if (pqi_disable_device_id_wildcards &&
9094                 id->subvendor == PCI_ANY_ID &&
9095                 id->subdevice == PCI_ANY_ID) {
9096                 dev_warn(&pci_dev->dev,
9097                         "controller not probed because device ID wildcards are disabled\n");
9098                 return -ENODEV;
9099         }
9100
9101         if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
9102                 dev_warn(&pci_dev->dev,
9103                         "controller device ID matched using wildcards\n");
9104
9105         node = dev_to_node(&pci_dev->dev);
9106         if (node == NUMA_NO_NODE) {
9107                 node = cpu_to_node(0);
9108                 if (node == NUMA_NO_NODE)
9109                         node = 0;
9110                 set_dev_node(&pci_dev->dev, node);
9111         }
9112
9113         ctrl_info = pqi_alloc_ctrl_info(node);
9114         if (!ctrl_info) {
9115                 dev_err(&pci_dev->dev,
9116                         "failed to allocate controller info block\n");
9117                 return -ENOMEM;
9118         }
9119         ctrl_info->numa_node = node;
9120
9121         ctrl_info->pci_dev = pci_dev;
9122
9123         rc = pqi_pci_init(ctrl_info);
9124         if (rc)
9125                 goto error;
9126
9127         rc = pqi_ctrl_init(ctrl_info);
9128         if (rc)
9129                 goto error;
9130
9131         return 0;
9132
9133 error:
9134         pqi_remove_ctrl(ctrl_info);
9135
9136         return rc;
9137 }
9138
9139 static void pqi_pci_remove(struct pci_dev *pci_dev)
9140 {
9141         struct pqi_ctrl_info *ctrl_info;
9142         u16 vendor_id;
9143         int rc;
9144
9145         ctrl_info = pci_get_drvdata(pci_dev);
9146         if (!ctrl_info)
9147                 return;
9148
9149         pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9150         if (vendor_id == 0xffff)
9151                 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9152         else
9153                 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9154
9155         if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9156                 rc = pqi_flush_cache(ctrl_info, RESTART);
9157                 if (rc)
9158                         dev_err(&pci_dev->dev,
9159                                 "unable to flush controller cache during remove\n");
9160         }
9161
9162         pqi_remove_ctrl(ctrl_info);
9163 }
9164
9165 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9166 {
9167         unsigned int i;
9168         struct pqi_io_request *io_request;
9169         struct scsi_cmnd *scmd;
9170
9171         for (i = 0; i < ctrl_info->max_io_slots; i++) {
9172                 io_request = &ctrl_info->io_request_pool[i];
9173                 if (atomic_read(&io_request->refcount) == 0)
9174                         continue;
9175                 scmd = io_request->scmd;
9176                 WARN_ON(scmd != NULL); /* IO command from SML */
9177                 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9178         }
9179 }
9180
9181 static void pqi_shutdown(struct pci_dev *pci_dev)
9182 {
9183         int rc;
9184         struct pqi_ctrl_info *ctrl_info;
9185         enum bmic_flush_cache_shutdown_event shutdown_event;
9186
9187         ctrl_info = pci_get_drvdata(pci_dev);
9188         if (!ctrl_info) {
9189                 dev_err(&pci_dev->dev,
9190                         "cache could not be flushed\n");
9191                 return;
9192         }
9193
9194         pqi_wait_until_ofa_finished(ctrl_info);
9195
9196         pqi_scsi_block_requests(ctrl_info);
9197         pqi_ctrl_block_device_reset(ctrl_info);
9198         pqi_ctrl_block_requests(ctrl_info);
9199         pqi_ctrl_wait_until_quiesced(ctrl_info);
9200
9201         if (system_state == SYSTEM_RESTART)
9202                 shutdown_event = RESTART;
9203         else
9204                 shutdown_event = SHUTDOWN;
9205
9206         /*
9207          * Write all data in the controller's battery-backed cache to
9208          * storage.
9209          */
9210         rc = pqi_flush_cache(ctrl_info, shutdown_event);
9211         if (rc)
9212                 dev_err(&pci_dev->dev,
9213                         "unable to flush controller cache during shutdown\n");
9214
9215         pqi_crash_if_pending_command(ctrl_info);
9216         pqi_reset(ctrl_info);
9217 }
9218
9219 static void pqi_process_lockup_action_param(void)
9220 {
9221         unsigned int i;
9222
9223         if (!pqi_lockup_action_param)
9224                 return;
9225
9226         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9227                 if (strcmp(pqi_lockup_action_param,
9228                         pqi_lockup_actions[i].name) == 0) {
9229                         pqi_lockup_action = pqi_lockup_actions[i].action;
9230                         return;
9231                 }
9232         }
9233
9234         pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9235                 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9236 }
9237
9238 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS           30
9239 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS           (30 * 60)
9240
9241 static void pqi_process_ctrl_ready_timeout_param(void)
9242 {
9243         if (pqi_ctrl_ready_timeout_secs == 0)
9244                 return;
9245
9246         if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9247                 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9248                         DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9249                 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9250         } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9251                 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9252                         DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9253                 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9254         }
9255
9256         sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9257 }
9258
9259 static void pqi_process_module_params(void)
9260 {
9261         pqi_process_lockup_action_param();
9262         pqi_process_ctrl_ready_timeout_param();
9263 }
9264
9265 #if defined(CONFIG_PM)
9266
9267 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9268 {
9269         if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9270                 return RESTART;
9271
9272         return SUSPEND;
9273 }
9274
9275 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9276 {
9277         struct pci_dev *pci_dev;
9278         struct pqi_ctrl_info *ctrl_info;
9279
9280         pci_dev = to_pci_dev(dev);
9281         ctrl_info = pci_get_drvdata(pci_dev);
9282
9283         pqi_wait_until_ofa_finished(ctrl_info);
9284
9285         pqi_ctrl_block_scan(ctrl_info);
9286         pqi_scsi_block_requests(ctrl_info);
9287         pqi_ctrl_block_device_reset(ctrl_info);
9288         pqi_ctrl_block_requests(ctrl_info);
9289         pqi_ctrl_wait_until_quiesced(ctrl_info);
9290
9291         if (suspend) {
9292                 enum bmic_flush_cache_shutdown_event shutdown_event;
9293
9294                 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9295                 pqi_flush_cache(ctrl_info, shutdown_event);
9296         }
9297
9298         pqi_stop_heartbeat_timer(ctrl_info);
9299         pqi_crash_if_pending_command(ctrl_info);
9300         pqi_free_irqs(ctrl_info);
9301
9302         ctrl_info->controller_online = false;
9303         ctrl_info->pqi_mode_enabled = false;
9304
9305         return 0;
9306 }
9307
9308 static __maybe_unused int pqi_suspend(struct device *dev)
9309 {
9310         return pqi_suspend_or_freeze(dev, true);
9311 }
9312
9313 static int pqi_resume_or_restore(struct device *dev)
9314 {
9315         int rc;
9316         struct pci_dev *pci_dev;
9317         struct pqi_ctrl_info *ctrl_info;
9318
9319         pci_dev = to_pci_dev(dev);
9320         ctrl_info = pci_get_drvdata(pci_dev);
9321
9322         rc = pqi_request_irqs(ctrl_info);
9323         if (rc)
9324                 return rc;
9325
9326         pqi_ctrl_unblock_device_reset(ctrl_info);
9327         pqi_ctrl_unblock_requests(ctrl_info);
9328         pqi_scsi_unblock_requests(ctrl_info);
9329         pqi_ctrl_unblock_scan(ctrl_info);
9330
9331         ssleep(PQI_POST_RESET_DELAY_SECS);
9332
9333         return pqi_ctrl_init_resume(ctrl_info);
9334 }
9335
9336 static int pqi_freeze(struct device *dev)
9337 {
9338         return pqi_suspend_or_freeze(dev, false);
9339 }
9340
9341 static int pqi_thaw(struct device *dev)
9342 {
9343         int rc;
9344         struct pci_dev *pci_dev;
9345         struct pqi_ctrl_info *ctrl_info;
9346
9347         pci_dev = to_pci_dev(dev);
9348         ctrl_info = pci_get_drvdata(pci_dev);
9349
9350         rc = pqi_request_irqs(ctrl_info);
9351         if (rc)
9352                 return rc;
9353
9354         ctrl_info->controller_online = true;
9355         ctrl_info->pqi_mode_enabled = true;
9356
9357         pqi_ctrl_unblock_device_reset(ctrl_info);
9358         pqi_ctrl_unblock_requests(ctrl_info);
9359         pqi_scsi_unblock_requests(ctrl_info);
9360         pqi_ctrl_unblock_scan(ctrl_info);
9361
9362         return 0;
9363 }
9364
9365 static int pqi_poweroff(struct device *dev)
9366 {
9367         struct pci_dev *pci_dev;
9368         struct pqi_ctrl_info *ctrl_info;
9369         enum bmic_flush_cache_shutdown_event shutdown_event;
9370
9371         pci_dev = to_pci_dev(dev);
9372         ctrl_info = pci_get_drvdata(pci_dev);
9373
9374         shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9375         pqi_flush_cache(ctrl_info, shutdown_event);
9376
9377         return 0;
9378 }
9379
9380 static const struct dev_pm_ops pqi_pm_ops = {
9381         .suspend = pqi_suspend,
9382         .resume = pqi_resume_or_restore,
9383         .freeze = pqi_freeze,
9384         .thaw = pqi_thaw,
9385         .poweroff = pqi_poweroff,
9386         .restore = pqi_resume_or_restore,
9387 };
9388
9389 #endif /* CONFIG_PM */
9390
9391 /* Define the PCI IDs for the controllers that we support. */
9392 static const struct pci_device_id pqi_pci_id_table[] = {
9393         {
9394                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9395                                0x105b, 0x1211)
9396         },
9397         {
9398                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9399                                0x105b, 0x1321)
9400         },
9401         {
9402                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9403                                0x152d, 0x8a22)
9404         },
9405         {
9406                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9407                                0x152d, 0x8a23)
9408         },
9409         {
9410                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9411                                0x152d, 0x8a24)
9412         },
9413         {
9414                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9415                                0x152d, 0x8a36)
9416         },
9417         {
9418                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9419                                0x152d, 0x8a37)
9420         },
9421         {
9422                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9423                                0x193d, 0x1104)
9424         },
9425         {
9426                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9427                                0x193d, 0x1105)
9428         },
9429         {
9430                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9431                                0x193d, 0x1106)
9432         },
9433         {
9434                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9435                                0x193d, 0x1107)
9436         },
9437         {
9438                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9439                                0x193d, 0x1108)
9440         },
9441         {
9442                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9443                                0x193d, 0x1109)
9444         },
9445         {
9446                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9447                                0x193d, 0x110b)
9448         },
9449         {
9450                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9451                                0x193d, 0x8460)
9452         },
9453         {
9454                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9455                                0x193d, 0x8461)
9456         },
9457         {
9458                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9459                                0x193d, 0xc460)
9460         },
9461         {
9462                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9463                                0x193d, 0xc461)
9464         },
9465         {
9466                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9467                                0x193d, 0xf460)
9468         },
9469         {
9470                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9471                                0x193d, 0xf461)
9472         },
9473         {
9474                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9475                                0x1bd4, 0x0045)
9476         },
9477         {
9478                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9479                                0x1bd4, 0x0046)
9480         },
9481         {
9482                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9483                                0x1bd4, 0x0047)
9484         },
9485         {
9486                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9487                                0x1bd4, 0x0048)
9488         },
9489         {
9490                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9491                                0x1bd4, 0x004a)
9492         },
9493         {
9494                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9495                                0x1bd4, 0x004b)
9496         },
9497         {
9498                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9499                                0x1bd4, 0x004c)
9500         },
9501         {
9502                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9503                                0x1bd4, 0x004f)
9504         },
9505         {
9506                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9507                                0x1bd4, 0x0051)
9508         },
9509         {
9510                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9511                                0x1bd4, 0x0052)
9512         },
9513         {
9514                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9515                                0x1bd4, 0x0053)
9516         },
9517         {
9518                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9519                                0x1bd4, 0x0054)
9520         },
9521         {
9522                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9523                                0x1bd4, 0x006b)
9524         },
9525         {
9526                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9527                                0x1bd4, 0x006c)
9528         },
9529         {
9530                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9531                                0x1bd4, 0x006d)
9532         },
9533         {
9534                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9535                                0x1bd4, 0x006f)
9536         },
9537         {
9538                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9539                                0x1bd4, 0x0070)
9540         },
9541         {
9542                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9543                                0x1bd4, 0x0071)
9544         },
9545         {
9546                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9547                                0x1bd4, 0x0072)
9548         },
9549         {
9550                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9551                                0x1bd4, 0x0086)
9552         },
9553         {
9554                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9555                                0x1bd4, 0x0087)
9556         },
9557         {
9558                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9559                                0x1bd4, 0x0088)
9560         },
9561         {
9562                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9563                                0x1bd4, 0x0089)
9564         },
9565         {
9566                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9567                                0x19e5, 0xd227)
9568         },
9569         {
9570                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9571                                0x19e5, 0xd228)
9572         },
9573         {
9574                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9575                                0x19e5, 0xd229)
9576         },
9577         {
9578                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9579                                0x19e5, 0xd22a)
9580         },
9581         {
9582                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9583                                0x19e5, 0xd22b)
9584         },
9585         {
9586                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9587                                0x19e5, 0xd22c)
9588         },
9589         {
9590                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9591                                PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9592         },
9593         {
9594                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9595                                PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9596         },
9597         {
9598                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9599                                PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9600         },
9601         {
9602                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9603                                PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9604         },
9605         {
9606                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9607                                PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9608         },
9609         {
9610                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9611                                PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9612         },
9613         {
9614                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9615                                PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9616         },
9617         {
9618                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9619                                PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9620         },
9621         {
9622                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9623                                PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9624         },
9625         {
9626                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9627                                PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9628         },
9629         {
9630                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9631                                PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9632         },
9633         {
9634                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9635                                PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9636         },
9637         {
9638                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9639                                PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9640         },
9641         {
9642                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9643                                PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9644         },
9645         {
9646                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9647                                PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9648         },
9649         {
9650                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9651                                PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9652         },
9653         {
9654                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9655                                PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9656         },
9657         {
9658                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9659                                PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9660         },
9661         {
9662                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9663                                PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9664         },
9665         {
9666                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9667                                PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9668         },
9669         {
9670                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9671                                PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9672         },
9673         {
9674                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9675                                PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9676         },
9677         {
9678                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9679                                PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9680         },
9681         {
9682                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9683                                PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9684         },
9685         {
9686                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9687                                PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9688         },
9689         {
9690                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9691                                PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9692         },
9693         {
9694                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9695                                PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9696         },
9697         {
9698                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9699                                PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9700         },
9701         {
9702                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9703                                PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9704         },
9705         {
9706                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9707                                PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9708         },
9709         {
9710                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9711                                PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9712         },
9713         {
9714                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9715                                PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9716         },
9717         {
9718                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9719                                PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9720         },
9721         {
9722                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9723                                PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9724         },
9725         {
9726                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9727                                PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9728         },
9729         {
9730                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9731                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9732         },
9733         {
9734                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9735                                PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9736         },
9737         {
9738                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9739                                PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9740         },
9741         {
9742                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9743                                PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9744         },
9745         {
9746                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9747                                PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9748         },
9749         {
9750                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9751                                PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9752         },
9753         {
9754                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9755                                PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9756         },
9757         {
9758                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9759                                PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9760         },
9761         {
9762                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9763                                PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9764         },
9765         {
9766                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9767                                PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9768         },
9769         {
9770                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9771                                PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9772         },
9773         {
9774                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9775                                PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9776         },
9777         {
9778                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9779                                PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9780         },
9781         {
9782                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9783                                PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9784         },
9785         {
9786                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9787                                PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9788         },
9789         {
9790                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9791                                PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9792         },
9793         {
9794                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9795                                PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9796         },
9797         {
9798                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9799                                PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9800         },
9801         {
9802                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9803                                PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9804         },
9805         {
9806                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9807                                PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9808         },
9809         {
9810                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9811                                PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9812         },
9813         {
9814                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9815                                PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9816         },
9817         {
9818                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9819                                PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9820         },
9821         {
9822                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9823                                PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9824         },
9825         {
9826                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9827                                PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9828         },
9829         {
9830                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9831                                PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9832         },
9833         {
9834                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9835                                PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9836         },
9837         {
9838                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9839                                PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9840         },
9841         {
9842                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9843                                PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9844         },
9845         {
9846                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9847                                PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
9848         },
9849         {
9850                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9851                                PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
9852         },
9853         {
9854                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9855                                PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
9856         },
9857         {
9858                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9859                                PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
9860         },
9861         {
9862                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9863                                PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
9864         },
9865         {
9866                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9867                                PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
9868         },
9869         {
9870                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9871                                PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
9872         },
9873         {
9874                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9875                                PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
9876         },
9877         {
9878                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9879                                PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
9880         },
9881         {
9882                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9883                                PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
9884         },
9885         {
9886                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9887                                PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
9888         },
9889         {
9890                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9891                                PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
9892         },
9893         {
9894                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9895                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
9896         },
9897         {
9898                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9899                                PCI_VENDOR_ID_DELL, 0x1fe0)
9900         },
9901         {
9902                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9903                                PCI_VENDOR_ID_HP, 0x0600)
9904         },
9905         {
9906                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9907                                PCI_VENDOR_ID_HP, 0x0601)
9908         },
9909         {
9910                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9911                                PCI_VENDOR_ID_HP, 0x0602)
9912         },
9913         {
9914                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9915                                PCI_VENDOR_ID_HP, 0x0603)
9916         },
9917         {
9918                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9919                                PCI_VENDOR_ID_HP, 0x0609)
9920         },
9921         {
9922                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9923                                PCI_VENDOR_ID_HP, 0x0650)
9924         },
9925         {
9926                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9927                                PCI_VENDOR_ID_HP, 0x0651)
9928         },
9929         {
9930                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9931                                PCI_VENDOR_ID_HP, 0x0652)
9932         },
9933         {
9934                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9935                                PCI_VENDOR_ID_HP, 0x0653)
9936         },
9937         {
9938                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9939                                PCI_VENDOR_ID_HP, 0x0654)
9940         },
9941         {
9942                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9943                                PCI_VENDOR_ID_HP, 0x0655)
9944         },
9945         {
9946                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9947                                PCI_VENDOR_ID_HP, 0x0700)
9948         },
9949         {
9950                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9951                                PCI_VENDOR_ID_HP, 0x0701)
9952         },
9953         {
9954                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9955                                PCI_VENDOR_ID_HP, 0x1001)
9956         },
9957         {
9958                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9959                                PCI_VENDOR_ID_HP, 0x1002)
9960         },
9961         {
9962                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9963                                PCI_VENDOR_ID_HP, 0x1100)
9964         },
9965         {
9966                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9967                                PCI_VENDOR_ID_HP, 0x1101)
9968         },
9969         {
9970                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9971                                0x1590, 0x0294)
9972         },
9973         {
9974                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9975                                0x1590, 0x02db)
9976         },
9977         {
9978                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9979                                0x1590, 0x02dc)
9980         },
9981         {
9982                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9983                                0x1590, 0x032e)
9984         },
9985         {
9986                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9987                                0x1590, 0x036f)
9988         },
9989         {
9990                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9991                                0x1590, 0x0381)
9992         },
9993         {
9994                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9995                                0x1590, 0x0382)
9996         },
9997         {
9998                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9999                                0x1590, 0x0383)
10000         },
10001         {
10002                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10003                                0x1d8d, 0x0800)
10004         },
10005         {
10006                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10007                                0x1d8d, 0x0908)
10008         },
10009         {
10010                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10011                                0x1d8d, 0x0806)
10012         },
10013         {
10014                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10015                                0x1d8d, 0x0916)
10016         },
10017         {
10018                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10019                                PCI_VENDOR_ID_GIGABYTE, 0x1000)
10020         },
10021         {
10022                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10023                                0x1dfc, 0x3161)
10024         },
10025         {
10026                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10027                                0x1f0c, 0x3161)
10028         },
10029         {
10030                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10031                                0x1cf2, 0x0804)
10032         },
10033         {
10034                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10035                                0x1cf2, 0x0805)
10036         },
10037         {
10038                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10039                                0x1cf2, 0x0806)
10040         },
10041         {
10042                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10043                                0x1cf2, 0x5445)
10044         },
10045         {
10046                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10047                                0x1cf2, 0x5446)
10048         },
10049         {
10050                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10051                                0x1cf2, 0x5447)
10052         },
10053         {
10054                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10055                                0x1cf2, 0x5449)
10056         },
10057         {
10058                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10059                                0x1cf2, 0x544a)
10060         },
10061         {
10062                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10063                                0x1cf2, 0x544b)
10064         },
10065         {
10066                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10067                                0x1cf2, 0x544d)
10068         },
10069         {
10070                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10071                                0x1cf2, 0x544e)
10072         },
10073         {
10074                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10075                                0x1cf2, 0x544f)
10076         },
10077         {
10078                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10079                                0x1cf2, 0x54da)
10080         },
10081         {
10082                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10083                                0x1cf2, 0x54db)
10084         },
10085         {
10086                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10087                                0x1cf2, 0x54dc)
10088         },
10089         {
10090                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10091                                0x1cf2, 0x0b27)
10092         },
10093         {
10094                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10095                                0x1cf2, 0x0b29)
10096         },
10097         {
10098                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10099                                0x1cf2, 0x0b45)
10100         },
10101         {
10102                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10103                                0x1cc4, 0x0101)
10104         },
10105         {
10106                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10107                                0x1cc4, 0x0201)
10108         },
10109         {
10110                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10111                                PCI_VENDOR_ID_LENOVO, 0x0220)
10112         },
10113         {
10114                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10115                                PCI_VENDOR_ID_LENOVO, 0x0221)
10116         },
10117         {
10118                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10119                                PCI_VENDOR_ID_LENOVO, 0x0520)
10120         },
10121         {
10122                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10123                                PCI_VENDOR_ID_LENOVO, 0x0522)
10124         },
10125         {
10126                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10127                                PCI_VENDOR_ID_LENOVO, 0x0620)
10128         },
10129         {
10130                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10131                                PCI_VENDOR_ID_LENOVO, 0x0621)
10132         },
10133         {
10134                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10135                                PCI_VENDOR_ID_LENOVO, 0x0622)
10136         },
10137         {
10138                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10139                                PCI_VENDOR_ID_LENOVO, 0x0623)
10140         },
10141         {
10142                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10143                                 0x1014, 0x0718)
10144         },
10145         {
10146                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10147                                 0x1e93, 0x1000)
10148         },
10149         {
10150                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10151                                 0x1e93, 0x1001)
10152         },
10153         {
10154                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10155                                 0x1e93, 0x1002)
10156         },
10157         {
10158                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10159                                 0x1e93, 0x1005)
10160         },
10161         {
10162                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10163                                 0x1f51, 0x1001)
10164         },
10165         {
10166                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10167                                 0x1f51, 0x1002)
10168         },
10169         {
10170                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10171                                 0x1f51, 0x1003)
10172         },
10173         {
10174                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10175                                 0x1f51, 0x1004)
10176         },
10177         {
10178                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10179                                 0x1f51, 0x1005)
10180         },
10181         {
10182                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10183                                 0x1f51, 0x1006)
10184         },
10185         {
10186                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10187                                 0x1f51, 0x1007)
10188         },
10189         {
10190                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10191                                 0x1f51, 0x1008)
10192         },
10193         {
10194                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10195                                 0x1f51, 0x1009)
10196         },
10197         {
10198                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10199                                 0x1f51, 0x100a)
10200         },
10201         {
10202                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10203                                PCI_ANY_ID, PCI_ANY_ID)
10204         },
10205         { 0 }
10206 };
10207
10208 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10209
10210 static struct pci_driver pqi_pci_driver = {
10211         .name = DRIVER_NAME_SHORT,
10212         .id_table = pqi_pci_id_table,
10213         .probe = pqi_pci_probe,
10214         .remove = pqi_pci_remove,
10215         .shutdown = pqi_shutdown,
10216 #if defined(CONFIG_PM)
10217         .driver = {
10218                 .pm = &pqi_pm_ops
10219         },
10220 #endif
10221 };
10222
10223 static int __init pqi_init(void)
10224 {
10225         int rc;
10226
10227         pr_info(DRIVER_NAME "\n");
10228         pqi_verify_structures();
10229         sis_verify_structures();
10230
10231         pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10232         if (!pqi_sas_transport_template)
10233                 return -ENODEV;
10234
10235         pqi_process_module_params();
10236
10237         rc = pci_register_driver(&pqi_pci_driver);
10238         if (rc)
10239                 sas_release_transport(pqi_sas_transport_template);
10240
10241         return rc;
10242 }
10243
10244 static void __exit pqi_cleanup(void)
10245 {
10246         pci_unregister_driver(&pqi_pci_driver);
10247         sas_release_transport(pqi_sas_transport_template);
10248 }
10249
10250 module_init(pqi_init);
10251 module_exit(pqi_cleanup);
10252
10253 static void pqi_verify_structures(void)
10254 {
10255         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10256                 sis_host_to_ctrl_doorbell) != 0x20);
10257         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10258                 sis_interrupt_mask) != 0x34);
10259         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10260                 sis_ctrl_to_host_doorbell) != 0x9c);
10261         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10262                 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10263         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10264                 sis_driver_scratch) != 0xb0);
10265         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10266                 sis_product_identifier) != 0xb4);
10267         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10268                 sis_firmware_status) != 0xbc);
10269         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10270                 sis_ctrl_shutdown_reason_code) != 0xcc);
10271         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10272                 sis_mailbox) != 0x1000);
10273         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10274                 pqi_registers) != 0x4000);
10275
10276         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10277                 iu_type) != 0x0);
10278         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10279                 iu_length) != 0x2);
10280         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10281                 response_queue_id) != 0x4);
10282         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10283                 driver_flags) != 0x6);
10284         BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10285
10286         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10287                 status) != 0x0);
10288         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10289                 service_response) != 0x1);
10290         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10291                 data_present) != 0x2);
10292         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10293                 reserved) != 0x3);
10294         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10295                 residual_count) != 0x4);
10296         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10297                 data_length) != 0x8);
10298         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10299                 reserved1) != 0xa);
10300         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10301                 data) != 0xc);
10302         BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10303
10304         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10305                 data_in_result) != 0x0);
10306         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10307                 data_out_result) != 0x1);
10308         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10309                 reserved) != 0x2);
10310         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10311                 status) != 0x5);
10312         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10313                 status_qualifier) != 0x6);
10314         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10315                 sense_data_length) != 0x8);
10316         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10317                 response_data_length) != 0xa);
10318         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10319                 data_in_transferred) != 0xc);
10320         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10321                 data_out_transferred) != 0x10);
10322         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10323                 data) != 0x14);
10324         BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10325
10326         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10327                 signature) != 0x0);
10328         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10329                 function_and_status_code) != 0x8);
10330         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10331                 max_admin_iq_elements) != 0x10);
10332         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10333                 max_admin_oq_elements) != 0x11);
10334         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10335                 admin_iq_element_length) != 0x12);
10336         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10337                 admin_oq_element_length) != 0x13);
10338         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10339                 max_reset_timeout) != 0x14);
10340         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10341                 legacy_intx_status) != 0x18);
10342         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10343                 legacy_intx_mask_set) != 0x1c);
10344         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10345                 legacy_intx_mask_clear) != 0x20);
10346         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10347                 device_status) != 0x40);
10348         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10349                 admin_iq_pi_offset) != 0x48);
10350         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10351                 admin_oq_ci_offset) != 0x50);
10352         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10353                 admin_iq_element_array_addr) != 0x58);
10354         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10355                 admin_oq_element_array_addr) != 0x60);
10356         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10357                 admin_iq_ci_addr) != 0x68);
10358         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10359                 admin_oq_pi_addr) != 0x70);
10360         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10361                 admin_iq_num_elements) != 0x78);
10362         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10363                 admin_oq_num_elements) != 0x79);
10364         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10365                 admin_queue_int_msg_num) != 0x7a);
10366         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10367                 device_error) != 0x80);
10368         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10369                 error_details) != 0x88);
10370         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10371                 device_reset) != 0x90);
10372         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10373                 power_action) != 0x94);
10374         BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10375
10376         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10377                 header.iu_type) != 0);
10378         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10379                 header.iu_length) != 2);
10380         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10381                 header.driver_flags) != 6);
10382         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10383                 request_id) != 8);
10384         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10385                 function_code) != 10);
10386         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10387                 data.report_device_capability.buffer_length) != 44);
10388         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10389                 data.report_device_capability.sg_descriptor) != 48);
10390         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10391                 data.create_operational_iq.queue_id) != 12);
10392         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10393                 data.create_operational_iq.element_array_addr) != 16);
10394         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10395                 data.create_operational_iq.ci_addr) != 24);
10396         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10397                 data.create_operational_iq.num_elements) != 32);
10398         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10399                 data.create_operational_iq.element_length) != 34);
10400         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10401                 data.create_operational_iq.queue_protocol) != 36);
10402         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10403                 data.create_operational_oq.queue_id) != 12);
10404         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10405                 data.create_operational_oq.element_array_addr) != 16);
10406         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10407                 data.create_operational_oq.pi_addr) != 24);
10408         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10409                 data.create_operational_oq.num_elements) != 32);
10410         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10411                 data.create_operational_oq.element_length) != 34);
10412         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10413                 data.create_operational_oq.queue_protocol) != 36);
10414         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10415                 data.create_operational_oq.int_msg_num) != 40);
10416         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10417                 data.create_operational_oq.coalescing_count) != 42);
10418         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10419                 data.create_operational_oq.min_coalescing_time) != 44);
10420         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10421                 data.create_operational_oq.max_coalescing_time) != 48);
10422         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10423                 data.delete_operational_queue.queue_id) != 12);
10424         BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10425         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10426                 data.create_operational_iq) != 64 - 11);
10427         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10428                 data.create_operational_oq) != 64 - 11);
10429         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10430                 data.delete_operational_queue) != 64 - 11);
10431
10432         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10433                 header.iu_type) != 0);
10434         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10435                 header.iu_length) != 2);
10436         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10437                 header.driver_flags) != 6);
10438         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10439                 request_id) != 8);
10440         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10441                 function_code) != 10);
10442         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10443                 status) != 11);
10444         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10445                 data.create_operational_iq.status_descriptor) != 12);
10446         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10447                 data.create_operational_iq.iq_pi_offset) != 16);
10448         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10449                 data.create_operational_oq.status_descriptor) != 12);
10450         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10451                 data.create_operational_oq.oq_ci_offset) != 16);
10452         BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10453
10454         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10455                 header.iu_type) != 0);
10456         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10457                 header.iu_length) != 2);
10458         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10459                 header.response_queue_id) != 4);
10460         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10461                 header.driver_flags) != 6);
10462         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10463                 request_id) != 8);
10464         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10465                 nexus_id) != 10);
10466         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10467                 buffer_length) != 12);
10468         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10469                 lun_number) != 16);
10470         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10471                 protocol_specific) != 24);
10472         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10473                 error_index) != 27);
10474         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10475                 cdb) != 32);
10476         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10477                 timeout) != 60);
10478         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10479                 sg_descriptors) != 64);
10480         BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10481                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10482
10483         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10484                 header.iu_type) != 0);
10485         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10486                 header.iu_length) != 2);
10487         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10488                 header.response_queue_id) != 4);
10489         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10490                 header.driver_flags) != 6);
10491         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10492                 request_id) != 8);
10493         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10494                 nexus_id) != 12);
10495         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10496                 buffer_length) != 16);
10497         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10498                 data_encryption_key_index) != 22);
10499         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10500                 encrypt_tweak_lower) != 24);
10501         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10502                 encrypt_tweak_upper) != 28);
10503         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10504                 cdb) != 32);
10505         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10506                 error_index) != 48);
10507         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10508                 num_sg_descriptors) != 50);
10509         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10510                 cdb_length) != 51);
10511         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10512                 lun_number) != 52);
10513         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10514                 sg_descriptors) != 64);
10515         BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10516                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10517
10518         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10519                 header.iu_type) != 0);
10520         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10521                 header.iu_length) != 2);
10522         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10523                 request_id) != 8);
10524         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10525                 error_index) != 10);
10526
10527         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10528                 header.iu_type) != 0);
10529         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10530                 header.iu_length) != 2);
10531         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10532                 header.response_queue_id) != 4);
10533         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10534                 request_id) != 8);
10535         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10536                 data.report_event_configuration.buffer_length) != 12);
10537         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10538                 data.report_event_configuration.sg_descriptors) != 16);
10539         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10540                 data.set_event_configuration.global_event_oq_id) != 10);
10541         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10542                 data.set_event_configuration.buffer_length) != 12);
10543         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10544                 data.set_event_configuration.sg_descriptors) != 16);
10545
10546         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10547                 max_inbound_iu_length) != 6);
10548         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10549                 max_outbound_iu_length) != 14);
10550         BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10551
10552         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10553                 data_length) != 0);
10554         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10555                 iq_arbitration_priority_support_bitmask) != 8);
10556         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10557                 maximum_aw_a) != 9);
10558         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10559                 maximum_aw_b) != 10);
10560         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10561                 maximum_aw_c) != 11);
10562         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10563                 max_inbound_queues) != 16);
10564         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10565                 max_elements_per_iq) != 18);
10566         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10567                 max_iq_element_length) != 24);
10568         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10569                 min_iq_element_length) != 26);
10570         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10571                 max_outbound_queues) != 30);
10572         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10573                 max_elements_per_oq) != 32);
10574         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10575                 intr_coalescing_time_granularity) != 34);
10576         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10577                 max_oq_element_length) != 36);
10578         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10579                 min_oq_element_length) != 38);
10580         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10581                 iu_layer_descriptors) != 64);
10582         BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10583
10584         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10585                 event_type) != 0);
10586         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10587                 oq_id) != 2);
10588         BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10589
10590         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10591                 num_event_descriptors) != 2);
10592         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10593                 descriptors) != 4);
10594
10595         BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10596                 ARRAY_SIZE(pqi_supported_event_types));
10597
10598         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10599                 header.iu_type) != 0);
10600         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10601                 header.iu_length) != 2);
10602         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10603                 event_type) != 8);
10604         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10605                 event_id) != 10);
10606         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10607                 additional_event_id) != 12);
10608         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10609                 data) != 16);
10610         BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10611
10612         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10613                 header.iu_type) != 0);
10614         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10615                 header.iu_length) != 2);
10616         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10617                 event_type) != 8);
10618         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10619                 event_id) != 10);
10620         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10621                 additional_event_id) != 12);
10622         BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10623
10624         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10625                 header.iu_type) != 0);
10626         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10627                 header.iu_length) != 2);
10628         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10629                 request_id) != 8);
10630         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10631                 nexus_id) != 10);
10632         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10633                 timeout) != 14);
10634         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10635                 lun_number) != 16);
10636         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10637                 protocol_specific) != 24);
10638         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10639                 outbound_queue_id_to_manage) != 26);
10640         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10641                 request_id_to_manage) != 28);
10642         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10643                 task_management_function) != 30);
10644         BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10645
10646         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10647                 header.iu_type) != 0);
10648         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10649                 header.iu_length) != 2);
10650         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10651                 request_id) != 8);
10652         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10653                 nexus_id) != 10);
10654         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10655                 additional_response_info) != 12);
10656         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10657                 response_code) != 15);
10658         BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10659
10660         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10661                 configured_logical_drive_count) != 0);
10662         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10663                 configuration_signature) != 1);
10664         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10665                 firmware_version_short) != 5);
10666         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10667                 extended_logical_unit_count) != 154);
10668         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10669                 firmware_build_number) != 190);
10670         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10671                 vendor_id) != 200);
10672         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10673                 product_id) != 208);
10674         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10675                 extra_controller_flags) != 286);
10676         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10677                 controller_mode) != 292);
10678         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10679                 spare_part_number) != 293);
10680         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10681                 firmware_version_long) != 325);
10682
10683         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10684                 phys_bay_in_box) != 115);
10685         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10686                 device_type) != 120);
10687         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10688                 redundant_path_present_map) != 1736);
10689         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10690                 active_path_number) != 1738);
10691         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10692                 alternate_paths_phys_connector) != 1739);
10693         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10694                 alternate_paths_phys_box_on_port) != 1755);
10695         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10696                 current_queue_depth_limit) != 1796);
10697         BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
10698
10699         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
10700         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10701                 page_code) != 0);
10702         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10703                 subpage_code) != 1);
10704         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
10705                 buffer_length) != 2);
10706
10707         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
10708         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10709                 page_code) != 0);
10710         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10711                 subpage_code) != 1);
10712         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
10713                 page_length) != 2);
10714
10715         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
10716                 != 18);
10717         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10718                 header) != 0);
10719         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10720                 firmware_read_support) != 4);
10721         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10722                 driver_read_support) != 5);
10723         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10724                 firmware_write_support) != 6);
10725         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10726                 driver_write_support) != 7);
10727         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10728                 max_transfer_encrypted_sas_sata) != 8);
10729         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10730                 max_transfer_encrypted_nvme) != 10);
10731         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10732                 max_write_raid_5_6) != 12);
10733         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10734                 max_write_raid_1_10_2drive) != 14);
10735         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
10736                 max_write_raid_1_10_3drive) != 16);
10737
10738         BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
10739         BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
10740         BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
10741                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10742         BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
10743                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10744         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
10745         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
10746                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10747         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
10748         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
10749                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
10750
10751         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
10752         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
10753                 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
10754 }
This page took 0.671604 seconds and 4 git commands to generate.