]> Git Repo - J-linux.git/blob - drivers/scsi/smartpqi/smartpqi_init.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / scsi / smartpqi / smartpqi_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    driver for Microchip PQI-based storage controllers
4  *    Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5  *    Copyright (c) 2016-2018 Microsemi Corporation
6  *    Copyright (c) 2016 PMC-Sierra, Inc.
7  *
8  *    Questions/Comments/Bugfixes to [email protected]
9  *
10  */
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <linux/unaligned.h>
29 #include "smartpqi.h"
30 #include "smartpqi_sis.h"
31
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
34 #endif
35
36 #define DRIVER_VERSION          "2.1.30-031"
37 #define DRIVER_MAJOR            2
38 #define DRIVER_MINOR            1
39 #define DRIVER_RELEASE          30
40 #define DRIVER_REVISION         31
41
42 #define DRIVER_NAME             "Microchip SmartPQI Driver (v" \
43                                 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT       "smartpqi"
45
46 #define PQI_EXTRA_SGL_MEMORY    (12 * sizeof(struct pqi_sg_descriptor))
47
48 #define PQI_POST_RESET_DELAY_SECS                       5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS      10
50
51 #define PQI_NO_COMPLETION       ((void *)-1)
52
53 MODULE_AUTHOR("Microchip");
54 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
55         DRIVER_VERSION);
56 MODULE_VERSION(DRIVER_VERSION);
57 MODULE_LICENSE("GPL");
58
59 struct pqi_cmd_priv {
60         int this_residual;
61 };
62
63 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
64 {
65         return scsi_cmd_priv(cmd);
66 }
67
68 static void pqi_verify_structures(void);
69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
70         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
71 static void pqi_ctrl_offline_worker(struct work_struct *work);
72 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_scan_start(struct Scsi_Host *shost);
74 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
75         struct pqi_queue_group *queue_group, enum pqi_io_path path,
76         struct pqi_io_request *io_request);
77 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
78         struct pqi_iu_header *request, unsigned int flags,
79         struct pqi_raid_error_info *error_info);
80 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
81         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
82         unsigned int cdb_length, struct pqi_queue_group *queue_group,
83         struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
84 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
85         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
86         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
87         struct pqi_scsi_dev_raid_map_data *rmd);
88 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
89         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
90         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
91         struct pqi_scsi_dev_raid_map_data *rmd);
92 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
93 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
94 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
95 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
96 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
97 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
98 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
99         struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
100 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
101 static void pqi_tmf_worker(struct work_struct *work);
102
103 /* for flags argument to pqi_submit_raid_request_synchronous() */
104 #define PQI_SYNC_FLAGS_INTERRUPTABLE    0x1
105
106 static struct scsi_transport_template *pqi_sas_transport_template;
107
108 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
109
110 enum pqi_lockup_action {
111         NONE,
112         REBOOT,
113         PANIC
114 };
115
116 static enum pqi_lockup_action pqi_lockup_action = NONE;
117
118 static struct {
119         enum pqi_lockup_action  action;
120         char                    *name;
121 } pqi_lockup_actions[] = {
122         {
123                 .action = NONE,
124                 .name = "none",
125         },
126         {
127                 .action = REBOOT,
128                 .name = "reboot",
129         },
130         {
131                 .action = PANIC,
132                 .name = "panic",
133         },
134 };
135
136 static unsigned int pqi_supported_event_types[] = {
137         PQI_EVENT_TYPE_HOTPLUG,
138         PQI_EVENT_TYPE_HARDWARE,
139         PQI_EVENT_TYPE_PHYSICAL_DEVICE,
140         PQI_EVENT_TYPE_LOGICAL_DEVICE,
141         PQI_EVENT_TYPE_OFA,
142         PQI_EVENT_TYPE_AIO_STATE_CHANGE,
143         PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
144 };
145
146 static int pqi_disable_device_id_wildcards;
147 module_param_named(disable_device_id_wildcards,
148         pqi_disable_device_id_wildcards, int, 0644);
149 MODULE_PARM_DESC(disable_device_id_wildcards,
150         "Disable device ID wildcards.");
151
152 static int pqi_disable_heartbeat;
153 module_param_named(disable_heartbeat,
154         pqi_disable_heartbeat, int, 0644);
155 MODULE_PARM_DESC(disable_heartbeat,
156         "Disable heartbeat.");
157
158 static int pqi_disable_ctrl_shutdown;
159 module_param_named(disable_ctrl_shutdown,
160         pqi_disable_ctrl_shutdown, int, 0644);
161 MODULE_PARM_DESC(disable_ctrl_shutdown,
162         "Disable controller shutdown when controller locked up.");
163
164 static char *pqi_lockup_action_param;
165 module_param_named(lockup_action,
166         pqi_lockup_action_param, charp, 0644);
167 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
168         "\t\tSupported: none, reboot, panic\n"
169         "\t\tDefault: none");
170
171 static int pqi_expose_ld_first;
172 module_param_named(expose_ld_first,
173         pqi_expose_ld_first, int, 0644);
174 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
175
176 static int pqi_hide_vsep;
177 module_param_named(hide_vsep,
178         pqi_hide_vsep, int, 0644);
179 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
180
181 static int pqi_disable_managed_interrupts;
182 module_param_named(disable_managed_interrupts,
183         pqi_disable_managed_interrupts, int, 0644);
184 MODULE_PARM_DESC(disable_managed_interrupts,
185         "Disable the kernel automatically assigning SMP affinity to IRQs.");
186
187 static unsigned int pqi_ctrl_ready_timeout_secs;
188 module_param_named(ctrl_ready_timeout,
189         pqi_ctrl_ready_timeout_secs, uint, 0644);
190 MODULE_PARM_DESC(ctrl_ready_timeout,
191         "Timeout in seconds for driver to wait for controller ready.");
192
193 static char *raid_levels[] = {
194         "RAID-0",
195         "RAID-4",
196         "RAID-1(1+0)",
197         "RAID-5",
198         "RAID-5+1",
199         "RAID-6",
200         "RAID-1(Triple)",
201 };
202
203 static char *pqi_raid_level_to_string(u8 raid_level)
204 {
205         if (raid_level < ARRAY_SIZE(raid_levels))
206                 return raid_levels[raid_level];
207
208         return "RAID UNKNOWN";
209 }
210
211 #define SA_RAID_0               0
212 #define SA_RAID_4               1
213 #define SA_RAID_1               2       /* also used for RAID 10 */
214 #define SA_RAID_5               3       /* also used for RAID 50 */
215 #define SA_RAID_51              4
216 #define SA_RAID_6               5       /* also used for RAID 60 */
217 #define SA_RAID_TRIPLE          6       /* also used for RAID 1+0 Triple */
218 #define SA_RAID_MAX             SA_RAID_TRIPLE
219 #define SA_RAID_UNKNOWN         0xff
220
221 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
222 {
223         pqi_prep_for_scsi_done(scmd);
224         scsi_done(scmd);
225 }
226
227 static inline void pqi_disable_write_same(struct scsi_device *sdev)
228 {
229         sdev->no_write_same = 1;
230 }
231
232 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
233 {
234         return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
235 }
236
237 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
238 {
239         return !device->is_physical_device;
240 }
241
242 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
243 {
244         return scsi3addr[2] != 0;
245 }
246
247 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
248 {
249         return !ctrl_info->controller_online;
250 }
251
252 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
253 {
254         if (ctrl_info->controller_online)
255                 if (!sis_is_firmware_running(ctrl_info))
256                         pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
257 }
258
259 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
260 {
261         return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
262 }
263
264 #define PQI_DRIVER_SCRATCH_PQI_MODE                     0x1
265 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED          0x2
266
267 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
268 {
269         return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
270 }
271
272 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
273         enum pqi_ctrl_mode mode)
274 {
275         u32 driver_scratch;
276
277         driver_scratch = sis_read_driver_scratch(ctrl_info);
278
279         if (mode == PQI_MODE)
280                 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
281         else
282                 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
283
284         sis_write_driver_scratch(ctrl_info, driver_scratch);
285 }
286
287 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
288 {
289         return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
290 }
291
292 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
293 {
294         u32 driver_scratch;
295
296         driver_scratch = sis_read_driver_scratch(ctrl_info);
297
298         if (is_supported)
299                 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
300         else
301                 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
302
303         sis_write_driver_scratch(ctrl_info, driver_scratch);
304 }
305
306 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
307 {
308         ctrl_info->scan_blocked = true;
309         mutex_lock(&ctrl_info->scan_mutex);
310 }
311
312 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
313 {
314         ctrl_info->scan_blocked = false;
315         mutex_unlock(&ctrl_info->scan_mutex);
316 }
317
318 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
319 {
320         return ctrl_info->scan_blocked;
321 }
322
323 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
324 {
325         mutex_lock(&ctrl_info->lun_reset_mutex);
326 }
327
328 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
329 {
330         mutex_unlock(&ctrl_info->lun_reset_mutex);
331 }
332
333 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
334 {
335         struct Scsi_Host *shost;
336         unsigned int num_loops;
337         int msecs_sleep;
338
339         shost = ctrl_info->scsi_host;
340
341         scsi_block_requests(shost);
342
343         num_loops = 0;
344         msecs_sleep = 20;
345         while (scsi_host_busy(shost)) {
346                 num_loops++;
347                 if (num_loops == 10)
348                         msecs_sleep = 500;
349                 msleep(msecs_sleep);
350         }
351 }
352
353 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
354 {
355         scsi_unblock_requests(ctrl_info->scsi_host);
356 }
357
358 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
359 {
360         atomic_inc(&ctrl_info->num_busy_threads);
361 }
362
363 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
364 {
365         atomic_dec(&ctrl_info->num_busy_threads);
366 }
367
368 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
369 {
370         return ctrl_info->block_requests;
371 }
372
373 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
374 {
375         ctrl_info->block_requests = true;
376 }
377
378 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
379 {
380         ctrl_info->block_requests = false;
381         wake_up_all(&ctrl_info->block_requests_wait);
382 }
383
384 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
385 {
386         if (!pqi_ctrl_blocked(ctrl_info))
387                 return;
388
389         atomic_inc(&ctrl_info->num_blocked_threads);
390         wait_event(ctrl_info->block_requests_wait,
391                 !pqi_ctrl_blocked(ctrl_info));
392         atomic_dec(&ctrl_info->num_blocked_threads);
393 }
394
395 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS                10
396
397 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
398 {
399         unsigned long start_jiffies;
400         unsigned long warning_timeout;
401         bool displayed_warning;
402
403         displayed_warning = false;
404         start_jiffies = jiffies;
405         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
406
407         while (atomic_read(&ctrl_info->num_busy_threads) >
408                 atomic_read(&ctrl_info->num_blocked_threads)) {
409                 if (time_after(jiffies, warning_timeout)) {
410                         dev_warn(&ctrl_info->pci_dev->dev,
411                                 "waiting %u seconds for driver activity to quiesce\n",
412                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
413                         displayed_warning = true;
414                         warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
415                 }
416                 usleep_range(1000, 2000);
417         }
418
419         if (displayed_warning)
420                 dev_warn(&ctrl_info->pci_dev->dev,
421                         "driver activity quiesced after waiting for %u seconds\n",
422                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
423 }
424
425 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
426 {
427         return device->device_offline;
428 }
429
430 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
431 {
432         mutex_lock(&ctrl_info->ofa_mutex);
433 }
434
435 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
436 {
437         mutex_unlock(&ctrl_info->ofa_mutex);
438 }
439
440 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
441 {
442         mutex_lock(&ctrl_info->ofa_mutex);
443         mutex_unlock(&ctrl_info->ofa_mutex);
444 }
445
446 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
447 {
448         return mutex_is_locked(&ctrl_info->ofa_mutex);
449 }
450
451 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
452 {
453         device->in_remove = true;
454 }
455
456 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
457 {
458         return device->in_remove;
459 }
460
461 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
462 {
463         device->in_reset[lun] = true;
464 }
465
466 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
467 {
468         device->in_reset[lun] = false;
469 }
470
471 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
472 {
473         return device->in_reset[lun];
474 }
475
476 static inline int pqi_event_type_to_event_index(unsigned int event_type)
477 {
478         int index;
479
480         for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
481                 if (event_type == pqi_supported_event_types[index])
482                         return index;
483
484         return -1;
485 }
486
487 static inline bool pqi_is_supported_event(unsigned int event_type)
488 {
489         return pqi_event_type_to_event_index(event_type) != -1;
490 }
491
492 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
493         unsigned long delay)
494 {
495         if (pqi_ctrl_offline(ctrl_info))
496                 return;
497
498         schedule_delayed_work(&ctrl_info->rescan_work, delay);
499 }
500
501 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
502 {
503         pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
504 }
505
506 #define PQI_RESCAN_WORK_DELAY   (10 * HZ)
507
508 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
509 {
510         pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
511 }
512
513 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
514 {
515         cancel_delayed_work_sync(&ctrl_info->rescan_work);
516 }
517
518 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
519 {
520         if (!ctrl_info->heartbeat_counter)
521                 return 0;
522
523         return readl(ctrl_info->heartbeat_counter);
524 }
525
526 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
527 {
528         return readb(ctrl_info->soft_reset_status);
529 }
530
531 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
532 {
533         u8 status;
534
535         status = pqi_read_soft_reset_status(ctrl_info);
536         status &= ~PQI_SOFT_RESET_ABORT;
537         writeb(status, ctrl_info->soft_reset_status);
538 }
539
540 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
541 {
542         bool io_high_prio;
543         int priority_class;
544
545         io_high_prio = false;
546
547         if (device->ncq_prio_enable) {
548                 priority_class =
549                         IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
550                 if (priority_class == IOPRIO_CLASS_RT) {
551                         /* Set NCQ priority for read/write commands. */
552                         switch (scmd->cmnd[0]) {
553                         case WRITE_16:
554                         case READ_16:
555                         case WRITE_12:
556                         case READ_12:
557                         case WRITE_10:
558                         case READ_10:
559                         case WRITE_6:
560                         case READ_6:
561                                 io_high_prio = true;
562                                 break;
563                         }
564                 }
565         }
566
567         return io_high_prio;
568 }
569
570 static int pqi_map_single(struct pci_dev *pci_dev,
571         struct pqi_sg_descriptor *sg_descriptor, void *buffer,
572         size_t buffer_length, enum dma_data_direction data_direction)
573 {
574         dma_addr_t bus_address;
575
576         if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
577                 return 0;
578
579         bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
580                 data_direction);
581         if (dma_mapping_error(&pci_dev->dev, bus_address))
582                 return -ENOMEM;
583
584         put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
585         put_unaligned_le32(buffer_length, &sg_descriptor->length);
586         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
587
588         return 0;
589 }
590
591 static void pqi_pci_unmap(struct pci_dev *pci_dev,
592         struct pqi_sg_descriptor *descriptors, int num_descriptors,
593         enum dma_data_direction data_direction)
594 {
595         int i;
596
597         if (data_direction == DMA_NONE)
598                 return;
599
600         for (i = 0; i < num_descriptors; i++)
601                 dma_unmap_single(&pci_dev->dev,
602                         (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
603                         get_unaligned_le32(&descriptors[i].length),
604                         data_direction);
605 }
606
607 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
608         struct pqi_raid_path_request *request, u8 cmd,
609         u8 *scsi3addr, void *buffer, size_t buffer_length,
610         u16 vpd_page, enum dma_data_direction *dir)
611 {
612         u8 *cdb;
613         size_t cdb_length = buffer_length;
614
615         memset(request, 0, sizeof(*request));
616
617         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
618         put_unaligned_le16(offsetof(struct pqi_raid_path_request,
619                 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
620                 &request->header.iu_length);
621         put_unaligned_le32(buffer_length, &request->buffer_length);
622         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
623         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
624         request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
625
626         cdb = request->cdb;
627
628         switch (cmd) {
629         case INQUIRY:
630                 request->data_direction = SOP_READ_FLAG;
631                 cdb[0] = INQUIRY;
632                 if (vpd_page & VPD_PAGE) {
633                         cdb[1] = 0x1;
634                         cdb[2] = (u8)vpd_page;
635                 }
636                 cdb[4] = (u8)cdb_length;
637                 break;
638         case CISS_REPORT_LOG:
639         case CISS_REPORT_PHYS:
640                 request->data_direction = SOP_READ_FLAG;
641                 cdb[0] = cmd;
642                 if (cmd == CISS_REPORT_PHYS) {
643                         if (ctrl_info->rpl_extended_format_4_5_supported)
644                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
645                         else
646                                 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
647                 } else {
648                         cdb[1] = ctrl_info->ciss_report_log_flags;
649                 }
650                 put_unaligned_be32(cdb_length, &cdb[6]);
651                 break;
652         case CISS_GET_RAID_MAP:
653                 request->data_direction = SOP_READ_FLAG;
654                 cdb[0] = CISS_READ;
655                 cdb[1] = CISS_GET_RAID_MAP;
656                 put_unaligned_be32(cdb_length, &cdb[6]);
657                 break;
658         case SA_FLUSH_CACHE:
659                 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
660                 request->data_direction = SOP_WRITE_FLAG;
661                 cdb[0] = BMIC_WRITE;
662                 cdb[6] = BMIC_FLUSH_CACHE;
663                 put_unaligned_be16(cdb_length, &cdb[7]);
664                 break;
665         case BMIC_SENSE_DIAG_OPTIONS:
666                 cdb_length = 0;
667                 fallthrough;
668         case BMIC_IDENTIFY_CONTROLLER:
669         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
670         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
671         case BMIC_SENSE_FEATURE:
672                 request->data_direction = SOP_READ_FLAG;
673                 cdb[0] = BMIC_READ;
674                 cdb[6] = cmd;
675                 put_unaligned_be16(cdb_length, &cdb[7]);
676                 break;
677         case BMIC_SET_DIAG_OPTIONS:
678                 cdb_length = 0;
679                 fallthrough;
680         case BMIC_WRITE_HOST_WELLNESS:
681                 request->data_direction = SOP_WRITE_FLAG;
682                 cdb[0] = BMIC_WRITE;
683                 cdb[6] = cmd;
684                 put_unaligned_be16(cdb_length, &cdb[7]);
685                 break;
686         case BMIC_CSMI_PASSTHRU:
687                 request->data_direction = SOP_BIDIRECTIONAL;
688                 cdb[0] = BMIC_WRITE;
689                 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
690                 cdb[6] = cmd;
691                 put_unaligned_be16(cdb_length, &cdb[7]);
692                 break;
693         default:
694                 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
695                 break;
696         }
697
698         switch (request->data_direction) {
699         case SOP_READ_FLAG:
700                 *dir = DMA_FROM_DEVICE;
701                 break;
702         case SOP_WRITE_FLAG:
703                 *dir = DMA_TO_DEVICE;
704                 break;
705         case SOP_NO_DIRECTION_FLAG:
706                 *dir = DMA_NONE;
707                 break;
708         default:
709                 *dir = DMA_BIDIRECTIONAL;
710                 break;
711         }
712
713         return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
714                 buffer, buffer_length, *dir);
715 }
716
717 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
718 {
719         io_request->scmd = NULL;
720         io_request->status = 0;
721         io_request->error_info = NULL;
722         io_request->raid_bypass = false;
723 }
724
725 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
726 {
727         struct pqi_io_request *io_request;
728         u16 i;
729
730         if (scmd) { /* SML I/O request */
731                 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
732
733                 i = blk_mq_unique_tag_to_tag(blk_tag);
734                 io_request = &ctrl_info->io_request_pool[i];
735                 if (atomic_inc_return(&io_request->refcount) > 1) {
736                         atomic_dec(&io_request->refcount);
737                         return NULL;
738                 }
739         } else { /* IOCTL or driver internal request */
740                 /*
741                  * benignly racy - may have to wait for an open slot.
742                  * command slot range is scsi_ml_can_queue -
743                  *         [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
744                  */
745                 i = 0;
746                 while (1) {
747                         io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
748                         if (atomic_inc_return(&io_request->refcount) == 1)
749                                 break;
750                         atomic_dec(&io_request->refcount);
751                         i = (i + 1) % PQI_RESERVED_IO_SLOTS;
752                 }
753         }
754
755         if (io_request)
756                 pqi_reinit_io_request(io_request);
757
758         return io_request;
759 }
760
761 static void pqi_free_io_request(struct pqi_io_request *io_request)
762 {
763         atomic_dec(&io_request->refcount);
764 }
765
766 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
767         u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
768         struct pqi_raid_error_info *error_info)
769 {
770         int rc;
771         struct pqi_raid_path_request request;
772         enum dma_data_direction dir;
773
774         rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
775                 buffer, buffer_length, vpd_page, &dir);
776         if (rc)
777                 return rc;
778
779         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
780
781         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
782
783         return rc;
784 }
785
786 /* helper functions for pqi_send_scsi_raid_request */
787
788 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
789         u8 cmd, void *buffer, size_t buffer_length)
790 {
791         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
792                 buffer, buffer_length, 0, NULL);
793 }
794
795 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
796         u8 cmd, void *buffer, size_t buffer_length,
797         struct pqi_raid_error_info *error_info)
798 {
799         return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
800                 buffer, buffer_length, 0, error_info);
801 }
802
803 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
804         struct bmic_identify_controller *buffer)
805 {
806         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
807                 buffer, sizeof(*buffer));
808 }
809
810 static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
811         struct bmic_sense_subsystem_info *sense_info)
812 {
813         return pqi_send_ctrl_raid_request(ctrl_info,
814                 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
815                 sizeof(*sense_info));
816 }
817
818 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
819         u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
820 {
821         return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
822                 buffer, buffer_length, vpd_page, NULL);
823 }
824
825 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
826         struct pqi_scsi_dev *device,
827         struct bmic_identify_physical_device *buffer, size_t buffer_length)
828 {
829         int rc;
830         enum dma_data_direction dir;
831         u16 bmic_device_index;
832         struct pqi_raid_path_request request;
833
834         rc = pqi_build_raid_path_request(ctrl_info, &request,
835                 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
836                 buffer_length, 0, &dir);
837         if (rc)
838                 return rc;
839
840         bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
841         request.cdb[2] = (u8)bmic_device_index;
842         request.cdb[9] = (u8)(bmic_device_index >> 8);
843
844         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
845
846         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
847
848         return rc;
849 }
850
851 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
852 {
853         u32 bytes;
854
855         bytes = get_unaligned_le16(limit);
856         if (bytes == 0)
857                 bytes = ~0;
858         else
859                 bytes *= 1024;
860
861         return bytes;
862 }
863
864 #pragma pack(1)
865
866 struct bmic_sense_feature_buffer {
867         struct bmic_sense_feature_buffer_header header;
868         struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
869 };
870
871 #pragma pack()
872
873 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH       \
874         offsetofend(struct bmic_sense_feature_buffer, \
875                 aio_subpage.max_write_raid_1_10_3drive)
876
877 #define MINIMUM_AIO_SUBPAGE_LENGTH      \
878         (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
879                 max_write_raid_1_10_3drive) - \
880                 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
881
882 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
883 {
884         int rc;
885         enum dma_data_direction dir;
886         struct pqi_raid_path_request request;
887         struct bmic_sense_feature_buffer *buffer;
888
889         buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
890         if (!buffer)
891                 return -ENOMEM;
892
893         rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
894                 buffer, sizeof(*buffer), 0, &dir);
895         if (rc)
896                 goto error;
897
898         request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
899         request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
900
901         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
902
903         pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
904
905         if (rc)
906                 goto error;
907
908         if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
909                 buffer->header.subpage_code !=
910                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
911                 get_unaligned_le16(&buffer->header.buffer_length) <
912                         MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
913                 buffer->aio_subpage.header.page_code !=
914                         BMIC_SENSE_FEATURE_IO_PAGE ||
915                 buffer->aio_subpage.header.subpage_code !=
916                         BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
917                 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
918                         MINIMUM_AIO_SUBPAGE_LENGTH) {
919                 goto error;
920         }
921
922         ctrl_info->max_transfer_encrypted_sas_sata =
923                 pqi_aio_limit_to_bytes(
924                         &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
925
926         ctrl_info->max_transfer_encrypted_nvme =
927                 pqi_aio_limit_to_bytes(
928                         &buffer->aio_subpage.max_transfer_encrypted_nvme);
929
930         ctrl_info->max_write_raid_5_6 =
931                 pqi_aio_limit_to_bytes(
932                         &buffer->aio_subpage.max_write_raid_5_6);
933
934         ctrl_info->max_write_raid_1_10_2drive =
935                 pqi_aio_limit_to_bytes(
936                         &buffer->aio_subpage.max_write_raid_1_10_2drive);
937
938         ctrl_info->max_write_raid_1_10_3drive =
939                 pqi_aio_limit_to_bytes(
940                         &buffer->aio_subpage.max_write_raid_1_10_3drive);
941
942 error:
943         kfree(buffer);
944
945         return rc;
946 }
947
948 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
949         enum bmic_flush_cache_shutdown_event shutdown_event)
950 {
951         int rc;
952         struct bmic_flush_cache *flush_cache;
953
954         flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
955         if (!flush_cache)
956                 return -ENOMEM;
957
958         flush_cache->shutdown_event = shutdown_event;
959
960         rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
961                 sizeof(*flush_cache));
962
963         kfree(flush_cache);
964
965         return rc;
966 }
967
968 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
969         struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
970         struct pqi_raid_error_info *error_info)
971 {
972         return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
973                 buffer, buffer_length, error_info);
974 }
975
976 #define PQI_FETCH_PTRAID_DATA           (1 << 31)
977
978 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
979 {
980         int rc;
981         struct bmic_diag_options *diag;
982
983         diag = kzalloc(sizeof(*diag), GFP_KERNEL);
984         if (!diag)
985                 return -ENOMEM;
986
987         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
988                 diag, sizeof(*diag));
989         if (rc)
990                 goto out;
991
992         diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
993
994         rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
995                 sizeof(*diag));
996
997 out:
998         kfree(diag);
999
1000         return rc;
1001 }
1002
1003 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
1004         void *buffer, size_t buffer_length)
1005 {
1006         return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
1007                 buffer, buffer_length);
1008 }
1009
1010 #pragma pack(1)
1011
1012 struct bmic_host_wellness_driver_version {
1013         u8      start_tag[4];
1014         u8      driver_version_tag[2];
1015         __le16  driver_version_length;
1016         char    driver_version[32];
1017         u8      dont_write_tag[2];
1018         u8      end_tag[2];
1019 };
1020
1021 #pragma pack()
1022
1023 static int pqi_write_driver_version_to_host_wellness(
1024         struct pqi_ctrl_info *ctrl_info)
1025 {
1026         int rc;
1027         struct bmic_host_wellness_driver_version *buffer;
1028         size_t buffer_length;
1029
1030         buffer_length = sizeof(*buffer);
1031
1032         buffer = kmalloc(buffer_length, GFP_KERNEL);
1033         if (!buffer)
1034                 return -ENOMEM;
1035
1036         buffer->start_tag[0] = '<';
1037         buffer->start_tag[1] = 'H';
1038         buffer->start_tag[2] = 'W';
1039         buffer->start_tag[3] = '>';
1040         buffer->driver_version_tag[0] = 'D';
1041         buffer->driver_version_tag[1] = 'V';
1042         put_unaligned_le16(sizeof(buffer->driver_version),
1043                 &buffer->driver_version_length);
1044         strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1045                 sizeof(buffer->driver_version));
1046         buffer->dont_write_tag[0] = 'D';
1047         buffer->dont_write_tag[1] = 'W';
1048         buffer->end_tag[0] = 'Z';
1049         buffer->end_tag[1] = 'Z';
1050
1051         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1052
1053         kfree(buffer);
1054
1055         return rc;
1056 }
1057
1058 #pragma pack(1)
1059
1060 struct bmic_host_wellness_time {
1061         u8      start_tag[4];
1062         u8      time_tag[2];
1063         __le16  time_length;
1064         u8      time[8];
1065         u8      dont_write_tag[2];
1066         u8      end_tag[2];
1067 };
1068
1069 #pragma pack()
1070
1071 static int pqi_write_current_time_to_host_wellness(
1072         struct pqi_ctrl_info *ctrl_info)
1073 {
1074         int rc;
1075         struct bmic_host_wellness_time *buffer;
1076         size_t buffer_length;
1077         time64_t local_time;
1078         unsigned int year;
1079         struct tm tm;
1080
1081         buffer_length = sizeof(*buffer);
1082
1083         buffer = kmalloc(buffer_length, GFP_KERNEL);
1084         if (!buffer)
1085                 return -ENOMEM;
1086
1087         buffer->start_tag[0] = '<';
1088         buffer->start_tag[1] = 'H';
1089         buffer->start_tag[2] = 'W';
1090         buffer->start_tag[3] = '>';
1091         buffer->time_tag[0] = 'T';
1092         buffer->time_tag[1] = 'D';
1093         put_unaligned_le16(sizeof(buffer->time),
1094                 &buffer->time_length);
1095
1096         local_time = ktime_get_real_seconds();
1097         time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1098         year = tm.tm_year + 1900;
1099
1100         buffer->time[0] = bin2bcd(tm.tm_hour);
1101         buffer->time[1] = bin2bcd(tm.tm_min);
1102         buffer->time[2] = bin2bcd(tm.tm_sec);
1103         buffer->time[3] = 0;
1104         buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1105         buffer->time[5] = bin2bcd(tm.tm_mday);
1106         buffer->time[6] = bin2bcd(year / 100);
1107         buffer->time[7] = bin2bcd(year % 100);
1108
1109         buffer->dont_write_tag[0] = 'D';
1110         buffer->dont_write_tag[1] = 'W';
1111         buffer->end_tag[0] = 'Z';
1112         buffer->end_tag[1] = 'Z';
1113
1114         rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1115
1116         kfree(buffer);
1117
1118         return rc;
1119 }
1120
1121 #define PQI_UPDATE_TIME_WORK_INTERVAL   (24UL * 60 * 60 * HZ)
1122
1123 static void pqi_update_time_worker(struct work_struct *work)
1124 {
1125         int rc;
1126         struct pqi_ctrl_info *ctrl_info;
1127
1128         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1129                 update_time_work);
1130
1131         rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1132         if (rc)
1133                 dev_warn(&ctrl_info->pci_dev->dev,
1134                         "error updating time on controller\n");
1135
1136         schedule_delayed_work(&ctrl_info->update_time_work,
1137                 PQI_UPDATE_TIME_WORK_INTERVAL);
1138 }
1139
1140 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1141 {
1142         schedule_delayed_work(&ctrl_info->update_time_work, 0);
1143 }
1144
1145 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1146 {
1147         cancel_delayed_work_sync(&ctrl_info->update_time_work);
1148 }
1149
1150 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1151         size_t buffer_length)
1152 {
1153         return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1154 }
1155
1156 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1157 {
1158         int rc;
1159         size_t lun_list_length;
1160         size_t lun_data_length;
1161         size_t new_lun_list_length;
1162         void *lun_data = NULL;
1163         struct report_lun_header *report_lun_header;
1164
1165         report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1166         if (!report_lun_header) {
1167                 rc = -ENOMEM;
1168                 goto out;
1169         }
1170
1171         rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1172         if (rc)
1173                 goto out;
1174
1175         lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1176
1177 again:
1178         lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1179
1180         lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1181         if (!lun_data) {
1182                 rc = -ENOMEM;
1183                 goto out;
1184         }
1185
1186         if (lun_list_length == 0) {
1187                 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1188                 goto out;
1189         }
1190
1191         rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1192         if (rc)
1193                 goto out;
1194
1195         new_lun_list_length =
1196                 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1197
1198         if (new_lun_list_length > lun_list_length) {
1199                 lun_list_length = new_lun_list_length;
1200                 kfree(lun_data);
1201                 goto again;
1202         }
1203
1204 out:
1205         kfree(report_lun_header);
1206
1207         if (rc) {
1208                 kfree(lun_data);
1209                 lun_data = NULL;
1210         }
1211
1212         *buffer = lun_data;
1213
1214         return rc;
1215 }
1216
1217 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1218 {
1219         int rc;
1220         unsigned int i;
1221         u8 rpl_response_format;
1222         u32 num_physicals;
1223         void *rpl_list;
1224         struct report_lun_header *rpl_header;
1225         struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1226         struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1227
1228         rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1229         if (rc)
1230                 return rc;
1231
1232         if (ctrl_info->rpl_extended_format_4_5_supported) {
1233                 rpl_header = rpl_list;
1234                 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1235                 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1236                         *buffer = rpl_list;
1237                         return 0;
1238                 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1239                         dev_err(&ctrl_info->pci_dev->dev,
1240                                 "RPL returned unsupported data format %u\n",
1241                                 rpl_response_format);
1242                         return -EINVAL;
1243                 } else {
1244                         dev_warn(&ctrl_info->pci_dev->dev,
1245                                 "RPL returned extended format 2 instead of 4\n");
1246                 }
1247         }
1248
1249         rpl_8byte_wwid_list = rpl_list;
1250         num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1251
1252         rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1253                                                    num_physicals), GFP_KERNEL);
1254         if (!rpl_16byte_wwid_list)
1255                 return -ENOMEM;
1256
1257         put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1258                 &rpl_16byte_wwid_list->header.list_length);
1259         rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1260
1261         for (i = 0; i < num_physicals; i++) {
1262                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1263                 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1264                 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1265                 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1266                 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1267                 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1268                 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1269                 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1270         }
1271
1272         kfree(rpl_8byte_wwid_list);
1273         *buffer = rpl_16byte_wwid_list;
1274
1275         return 0;
1276 }
1277
1278 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1279 {
1280         return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1281 }
1282
1283 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1284         struct report_phys_lun_16byte_wwid_list **physdev_list,
1285         struct report_log_lun_list **logdev_list)
1286 {
1287         int rc;
1288         size_t logdev_list_length;
1289         size_t logdev_data_length;
1290         struct report_log_lun_list *internal_logdev_list;
1291         struct report_log_lun_list *logdev_data;
1292         struct report_lun_header report_lun_header;
1293
1294         rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1295         if (rc)
1296                 dev_err(&ctrl_info->pci_dev->dev,
1297                         "report physical LUNs failed\n");
1298
1299         rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1300         if (rc)
1301                 dev_err(&ctrl_info->pci_dev->dev,
1302                         "report logical LUNs failed\n");
1303
1304         /*
1305          * Tack the controller itself onto the end of the logical device list
1306          * by adding a list entry that is all zeros.
1307          */
1308
1309         logdev_data = *logdev_list;
1310
1311         if (logdev_data) {
1312                 logdev_list_length =
1313                         get_unaligned_be32(&logdev_data->header.list_length);
1314         } else {
1315                 memset(&report_lun_header, 0, sizeof(report_lun_header));
1316                 logdev_data =
1317                         (struct report_log_lun_list *)&report_lun_header;
1318                 logdev_list_length = 0;
1319         }
1320
1321         logdev_data_length = sizeof(struct report_lun_header) +
1322                 logdev_list_length;
1323
1324         internal_logdev_list = kmalloc(logdev_data_length +
1325                 sizeof(struct report_log_lun), GFP_KERNEL);
1326         if (!internal_logdev_list) {
1327                 kfree(*logdev_list);
1328                 *logdev_list = NULL;
1329                 return -ENOMEM;
1330         }
1331
1332         memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1333         memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1334                 sizeof(struct report_log_lun));
1335         put_unaligned_be32(logdev_list_length +
1336                 sizeof(struct report_log_lun),
1337                 &internal_logdev_list->header.list_length);
1338
1339         kfree(*logdev_list);
1340         *logdev_list = internal_logdev_list;
1341
1342         return 0;
1343 }
1344
1345 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1346         int bus, int target, int lun)
1347 {
1348         device->bus = bus;
1349         device->target = target;
1350         device->lun = lun;
1351 }
1352
1353 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1354 {
1355         u8 *scsi3addr;
1356         u32 lunid;
1357         int bus;
1358         int target;
1359         int lun;
1360
1361         scsi3addr = device->scsi3addr;
1362         lunid = get_unaligned_le32(scsi3addr);
1363
1364         if (pqi_is_hba_lunid(scsi3addr)) {
1365                 /* The specified device is the controller. */
1366                 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1367                 device->target_lun_valid = true;
1368                 return;
1369         }
1370
1371         if (pqi_is_logical_device(device)) {
1372                 if (device->is_external_raid_device) {
1373                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1374                         target = (lunid >> 16) & 0x3fff;
1375                         lun = lunid & 0xff;
1376                 } else {
1377                         bus = PQI_RAID_VOLUME_BUS;
1378                         target = 0;
1379                         lun = lunid & 0x3fff;
1380                 }
1381                 pqi_set_bus_target_lun(device, bus, target, lun);
1382                 device->target_lun_valid = true;
1383                 return;
1384         }
1385
1386         /*
1387          * Defer target and LUN assignment for non-controller physical devices
1388          * because the SAS transport layer will make these assignments later.
1389          */
1390         pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1391 }
1392
1393 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1394         struct pqi_scsi_dev *device)
1395 {
1396         int rc;
1397         u8 raid_level;
1398         u8 *buffer;
1399
1400         raid_level = SA_RAID_UNKNOWN;
1401
1402         buffer = kmalloc(64, GFP_KERNEL);
1403         if (buffer) {
1404                 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1405                         VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1406                 if (rc == 0) {
1407                         raid_level = buffer[8];
1408                         if (raid_level > SA_RAID_MAX)
1409                                 raid_level = SA_RAID_UNKNOWN;
1410                 }
1411                 kfree(buffer);
1412         }
1413
1414         device->raid_level = raid_level;
1415 }
1416
1417 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1418         struct pqi_scsi_dev *device, struct raid_map *raid_map)
1419 {
1420         char *err_msg;
1421         u32 raid_map_size;
1422         u32 r5or6_blocks_per_row;
1423
1424         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1425
1426         if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1427                 err_msg = "RAID map too small";
1428                 goto bad_raid_map;
1429         }
1430
1431         if (device->raid_level == SA_RAID_1) {
1432                 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1433                         err_msg = "invalid RAID-1 map";
1434                         goto bad_raid_map;
1435                 }
1436         } else if (device->raid_level == SA_RAID_TRIPLE) {
1437                 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1438                         err_msg = "invalid RAID-1(Triple) map";
1439                         goto bad_raid_map;
1440                 }
1441         } else if ((device->raid_level == SA_RAID_5 ||
1442                 device->raid_level == SA_RAID_6) &&
1443                 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1444                 /* RAID 50/60 */
1445                 r5or6_blocks_per_row =
1446                         get_unaligned_le16(&raid_map->strip_size) *
1447                         get_unaligned_le16(&raid_map->data_disks_per_row);
1448                 if (r5or6_blocks_per_row == 0) {
1449                         err_msg = "invalid RAID-5 or RAID-6 map";
1450                         goto bad_raid_map;
1451                 }
1452         }
1453
1454         return 0;
1455
1456 bad_raid_map:
1457         dev_warn(&ctrl_info->pci_dev->dev,
1458                 "logical device %08x%08x %s\n",
1459                 *((u32 *)&device->scsi3addr),
1460                 *((u32 *)&device->scsi3addr[4]), err_msg);
1461
1462         return -EINVAL;
1463 }
1464
1465 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1466         struct pqi_scsi_dev *device)
1467 {
1468         int rc;
1469         u32 raid_map_size;
1470         struct raid_map *raid_map;
1471
1472         raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1473         if (!raid_map)
1474                 return -ENOMEM;
1475
1476         rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1477                 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1478         if (rc)
1479                 goto error;
1480
1481         raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1482
1483         if (raid_map_size > sizeof(*raid_map)) {
1484
1485                 kfree(raid_map);
1486
1487                 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1488                 if (!raid_map)
1489                         return -ENOMEM;
1490
1491                 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1492                         device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1493                 if (rc)
1494                         goto error;
1495
1496                 if (get_unaligned_le32(&raid_map->structure_size)
1497                         != raid_map_size) {
1498                         dev_warn(&ctrl_info->pci_dev->dev,
1499                                 "requested %u bytes, received %u bytes\n",
1500                                 raid_map_size,
1501                                 get_unaligned_le32(&raid_map->structure_size));
1502                         rc = -EINVAL;
1503                         goto error;
1504                 }
1505         }
1506
1507         rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1508         if (rc)
1509                 goto error;
1510
1511         device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
1512         if (!device->raid_io_stats) {
1513                 rc = -ENOMEM;
1514                 goto error;
1515         }
1516
1517         device->raid_map = raid_map;
1518
1519         return 0;
1520
1521 error:
1522         kfree(raid_map);
1523
1524         return rc;
1525 }
1526
1527 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1528         struct pqi_scsi_dev *device)
1529 {
1530         if (!ctrl_info->lv_drive_type_mix_valid) {
1531                 device->max_transfer_encrypted = ~0;
1532                 return;
1533         }
1534
1535         switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1536         case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1537         case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1538         case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1539         case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1540         case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1541         case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1542         case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1543                 device->max_transfer_encrypted =
1544                         ctrl_info->max_transfer_encrypted_sas_sata;
1545                 break;
1546         case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1547                 device->max_transfer_encrypted =
1548                         ctrl_info->max_transfer_encrypted_nvme;
1549                 break;
1550         case LV_DRIVE_TYPE_MIX_UNKNOWN:
1551         case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1552         default:
1553                 device->max_transfer_encrypted =
1554                         min(ctrl_info->max_transfer_encrypted_sas_sata,
1555                                 ctrl_info->max_transfer_encrypted_nvme);
1556                 break;
1557         }
1558 }
1559
1560 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1561         struct pqi_scsi_dev *device)
1562 {
1563         int rc;
1564         u8 *buffer;
1565         u8 bypass_status;
1566
1567         buffer = kmalloc(64, GFP_KERNEL);
1568         if (!buffer)
1569                 return;
1570
1571         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1572                 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1573         if (rc)
1574                 goto out;
1575
1576 #define RAID_BYPASS_STATUS              4
1577 #define RAID_BYPASS_CONFIGURED          0x1
1578 #define RAID_BYPASS_ENABLED             0x2
1579
1580         bypass_status = buffer[RAID_BYPASS_STATUS];
1581         device->raid_bypass_configured =
1582                 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1583         if (device->raid_bypass_configured &&
1584                 (bypass_status & RAID_BYPASS_ENABLED) &&
1585                 pqi_get_raid_map(ctrl_info, device) == 0) {
1586                 device->raid_bypass_enabled = true;
1587                 if (get_unaligned_le16(&device->raid_map->flags) &
1588                         RAID_MAP_ENCRYPTION_ENABLED)
1589                         pqi_set_max_transfer_encrypted(ctrl_info, device);
1590         }
1591
1592 out:
1593         kfree(buffer);
1594 }
1595
1596 /*
1597  * Use vendor-specific VPD to determine online/offline status of a volume.
1598  */
1599
1600 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1601         struct pqi_scsi_dev *device)
1602 {
1603         int rc;
1604         size_t page_length;
1605         u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1606         bool volume_offline = true;
1607         u32 volume_flags;
1608         struct ciss_vpd_logical_volume_status *vpd;
1609
1610         vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1611         if (!vpd)
1612                 goto no_buffer;
1613
1614         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1615                 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1616         if (rc)
1617                 goto out;
1618
1619         if (vpd->page_code != CISS_VPD_LV_STATUS)
1620                 goto out;
1621
1622         page_length = offsetof(struct ciss_vpd_logical_volume_status,
1623                 volume_status) + vpd->page_length;
1624         if (page_length < sizeof(*vpd))
1625                 goto out;
1626
1627         volume_status = vpd->volume_status;
1628         volume_flags = get_unaligned_be32(&vpd->flags);
1629         volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1630
1631 out:
1632         kfree(vpd);
1633 no_buffer:
1634         device->volume_status = volume_status;
1635         device->volume_offline = volume_offline;
1636 }
1637
1638 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED   0x01
1639 #define PQI_DEVICE_PHY_MAP_SUPPORTED    0x10
1640 #define PQI_DEVICE_ERASE_IN_PROGRESS    0x10
1641
1642 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1643         struct pqi_scsi_dev *device,
1644         struct bmic_identify_physical_device *id_phys)
1645 {
1646         int rc;
1647
1648         memset(id_phys, 0, sizeof(*id_phys));
1649
1650         rc = pqi_identify_physical_device(ctrl_info, device,
1651                 id_phys, sizeof(*id_phys));
1652         if (rc) {
1653                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1654                 return rc;
1655         }
1656
1657         scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1658         scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1659
1660         memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1661         memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1662
1663         device->box_index = id_phys->box_index;
1664         device->phys_box_on_bus = id_phys->phys_box_on_bus;
1665         device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1666         device->queue_depth =
1667                 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1668         device->active_path_index = id_phys->active_path_number;
1669         device->path_map = id_phys->redundant_path_present_map;
1670         memcpy(&device->box,
1671                 &id_phys->alternate_paths_phys_box_on_port,
1672                 sizeof(device->box));
1673         memcpy(&device->phys_connector,
1674                 &id_phys->alternate_paths_phys_connector,
1675                 sizeof(device->phys_connector));
1676         device->bay = id_phys->phys_bay_in_box;
1677         device->lun_count = id_phys->multi_lun_device_lun_count;
1678         if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1679                 id_phys->phy_count)
1680                 device->phy_id =
1681                         id_phys->phy_to_phy_map[device->active_path_index];
1682         else
1683                 device->phy_id = 0xFF;
1684
1685         device->ncq_prio_support =
1686                 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1687                 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1688
1689         device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1690
1691         return 0;
1692 }
1693
1694 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1695         struct pqi_scsi_dev *device)
1696 {
1697         int rc;
1698         u8 *buffer;
1699
1700         buffer = kmalloc(64, GFP_KERNEL);
1701         if (!buffer)
1702                 return -ENOMEM;
1703
1704         /* Send an inquiry to the device to see what it is. */
1705         rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1706         if (rc)
1707                 goto out;
1708
1709         scsi_sanitize_inquiry_string(&buffer[8], 8);
1710         scsi_sanitize_inquiry_string(&buffer[16], 16);
1711
1712         device->devtype = buffer[0] & 0x1f;
1713         memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1714         memcpy(device->model, &buffer[16], sizeof(device->model));
1715
1716         if (device->devtype == TYPE_DISK) {
1717                 if (device->is_external_raid_device) {
1718                         device->raid_level = SA_RAID_UNKNOWN;
1719                         device->volume_status = CISS_LV_OK;
1720                         device->volume_offline = false;
1721                 } else {
1722                         pqi_get_raid_level(ctrl_info, device);
1723                         pqi_get_raid_bypass_status(ctrl_info, device);
1724                         pqi_get_volume_status(ctrl_info, device);
1725                 }
1726         }
1727
1728 out:
1729         kfree(buffer);
1730
1731         return rc;
1732 }
1733
1734 /*
1735  * Prevent adding drive to OS for some corner cases such as a drive
1736  * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1737  * the drive until the sanitize completes, which can take hours,
1738  * resulting in long bootup delays. Commands such as TUR, READ_CAP
1739  * are allowed, but READ/WRITE cause check condition. So the OS
1740  * cannot check/read the partition table.
1741  * Note: devices that have completed sanitize must be re-enabled
1742  *       using the management utility.
1743  */
1744 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1745 {
1746         return device->erase_in_progress;
1747 }
1748
1749 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1750         struct pqi_scsi_dev *device,
1751         struct bmic_identify_physical_device *id_phys)
1752 {
1753         int rc;
1754
1755         if (device->is_expander_smp_device)
1756                 return 0;
1757
1758         if (pqi_is_logical_device(device))
1759                 rc = pqi_get_logical_device_info(ctrl_info, device);
1760         else
1761                 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1762
1763         return rc;
1764 }
1765
1766 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1767         struct pqi_scsi_dev *device,
1768         struct bmic_identify_physical_device *id_phys)
1769 {
1770         int rc;
1771
1772         rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1773
1774         if (rc == 0 && device->lun_count == 0)
1775                 device->lun_count = 1;
1776
1777         return rc;
1778 }
1779
1780 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1781         struct pqi_scsi_dev *device)
1782 {
1783         char *status;
1784         static const char unknown_state_str[] =
1785                 "Volume is in an unknown state (%u)";
1786         char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1787
1788         switch (device->volume_status) {
1789         case CISS_LV_OK:
1790                 status = "Volume online";
1791                 break;
1792         case CISS_LV_FAILED:
1793                 status = "Volume failed";
1794                 break;
1795         case CISS_LV_NOT_CONFIGURED:
1796                 status = "Volume not configured";
1797                 break;
1798         case CISS_LV_DEGRADED:
1799                 status = "Volume degraded";
1800                 break;
1801         case CISS_LV_READY_FOR_RECOVERY:
1802                 status = "Volume ready for recovery operation";
1803                 break;
1804         case CISS_LV_UNDERGOING_RECOVERY:
1805                 status = "Volume undergoing recovery";
1806                 break;
1807         case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1808                 status = "Wrong physical drive was replaced";
1809                 break;
1810         case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1811                 status = "A physical drive not properly connected";
1812                 break;
1813         case CISS_LV_HARDWARE_OVERHEATING:
1814                 status = "Hardware is overheating";
1815                 break;
1816         case CISS_LV_HARDWARE_HAS_OVERHEATED:
1817                 status = "Hardware has overheated";
1818                 break;
1819         case CISS_LV_UNDERGOING_EXPANSION:
1820                 status = "Volume undergoing expansion";
1821                 break;
1822         case CISS_LV_NOT_AVAILABLE:
1823                 status = "Volume waiting for transforming volume";
1824                 break;
1825         case CISS_LV_QUEUED_FOR_EXPANSION:
1826                 status = "Volume queued for expansion";
1827                 break;
1828         case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1829                 status = "Volume disabled due to SCSI ID conflict";
1830                 break;
1831         case CISS_LV_EJECTED:
1832                 status = "Volume has been ejected";
1833                 break;
1834         case CISS_LV_UNDERGOING_ERASE:
1835                 status = "Volume undergoing background erase";
1836                 break;
1837         case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1838                 status = "Volume ready for predictive spare rebuild";
1839                 break;
1840         case CISS_LV_UNDERGOING_RPI:
1841                 status = "Volume undergoing rapid parity initialization";
1842                 break;
1843         case CISS_LV_PENDING_RPI:
1844                 status = "Volume queued for rapid parity initialization";
1845                 break;
1846         case CISS_LV_ENCRYPTED_NO_KEY:
1847                 status = "Encrypted volume inaccessible - key not present";
1848                 break;
1849         case CISS_LV_UNDERGOING_ENCRYPTION:
1850                 status = "Volume undergoing encryption process";
1851                 break;
1852         case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1853                 status = "Volume undergoing encryption re-keying process";
1854                 break;
1855         case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1856                 status = "Volume encrypted but encryption is disabled";
1857                 break;
1858         case CISS_LV_PENDING_ENCRYPTION:
1859                 status = "Volume pending migration to encrypted state";
1860                 break;
1861         case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1862                 status = "Volume pending encryption rekeying";
1863                 break;
1864         case CISS_LV_NOT_SUPPORTED:
1865                 status = "Volume not supported on this controller";
1866                 break;
1867         case CISS_LV_STATUS_UNAVAILABLE:
1868                 status = "Volume status not available";
1869                 break;
1870         default:
1871                 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1872                         unknown_state_str, device->volume_status);
1873                 status = unknown_state_buffer;
1874                 break;
1875         }
1876
1877         dev_info(&ctrl_info->pci_dev->dev,
1878                 "scsi %d:%d:%d:%d %s\n",
1879                 ctrl_info->scsi_host->host_no,
1880                 device->bus, device->target, device->lun, status);
1881 }
1882
1883 static void pqi_rescan_worker(struct work_struct *work)
1884 {
1885         struct pqi_ctrl_info *ctrl_info;
1886
1887         ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1888                 rescan_work);
1889
1890         pqi_scan_scsi_devices(ctrl_info);
1891 }
1892
1893 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1894         struct pqi_scsi_dev *device)
1895 {
1896         int rc;
1897
1898         if (pqi_is_logical_device(device))
1899                 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1900                         device->target, device->lun);
1901         else
1902                 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1903
1904         return rc;
1905 }
1906
1907 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS      (20 * 1000)
1908
1909 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1910 {
1911         int rc;
1912         int lun;
1913
1914         for (lun = 0; lun < device->lun_count; lun++) {
1915                 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1916                         PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1917                 if (rc)
1918                         dev_err(&ctrl_info->pci_dev->dev,
1919                                 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1920                                 ctrl_info->scsi_host->host_no, device->bus,
1921                                 device->target, lun,
1922                                 atomic_read(&device->scsi_cmds_outstanding[lun]));
1923         }
1924
1925         if (pqi_is_logical_device(device))
1926                 scsi_remove_device(device->sdev);
1927         else
1928                 pqi_remove_sas_device(device);
1929
1930         pqi_device_remove_start(device);
1931 }
1932
1933 /* Assumes the SCSI device list lock is held. */
1934
1935 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1936         int bus, int target, int lun)
1937 {
1938         struct pqi_scsi_dev *device;
1939
1940         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1941                 if (device->bus == bus && device->target == target && device->lun == lun)
1942                         return device;
1943
1944         return NULL;
1945 }
1946
1947 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1948 {
1949         if (dev1->is_physical_device != dev2->is_physical_device)
1950                 return false;
1951
1952         if (dev1->is_physical_device)
1953                 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1954
1955         return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1956 }
1957
1958 enum pqi_find_result {
1959         DEVICE_NOT_FOUND,
1960         DEVICE_CHANGED,
1961         DEVICE_SAME,
1962 };
1963
1964 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1965         struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1966 {
1967         struct pqi_scsi_dev *device;
1968
1969         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1970                 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1971                         *matching_device = device;
1972                         if (pqi_device_equal(device_to_find, device)) {
1973                                 if (device_to_find->volume_offline)
1974                                         return DEVICE_CHANGED;
1975                                 return DEVICE_SAME;
1976                         }
1977                         return DEVICE_CHANGED;
1978                 }
1979         }
1980
1981         return DEVICE_NOT_FOUND;
1982 }
1983
1984 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1985 {
1986         if (device->is_expander_smp_device)
1987                 return "Enclosure SMP    ";
1988
1989         return scsi_device_type(device->devtype);
1990 }
1991
1992 #define PQI_DEV_INFO_BUFFER_LENGTH      128
1993
1994 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1995         char *action, struct pqi_scsi_dev *device)
1996 {
1997         ssize_t count;
1998         char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
1999
2000         count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
2001                 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
2002
2003         if (device->target_lun_valid)
2004                 count += scnprintf(buffer + count,
2005                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2006                         "%d:%d",
2007                         device->target,
2008                         device->lun);
2009         else
2010                 count += scnprintf(buffer + count,
2011                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2012                         "-:-");
2013
2014         if (pqi_is_logical_device(device))
2015                 count += scnprintf(buffer + count,
2016                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2017                         " %08x%08x",
2018                         *((u32 *)&device->scsi3addr),
2019                         *((u32 *)&device->scsi3addr[4]));
2020         else
2021                 count += scnprintf(buffer + count,
2022                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2023                         " %016llx%016llx",
2024                         get_unaligned_be64(&device->wwid[0]),
2025                         get_unaligned_be64(&device->wwid[8]));
2026
2027         count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2028                 " %s %.8s %.16s ",
2029                 pqi_device_type(device),
2030                 device->vendor,
2031                 device->model);
2032
2033         if (pqi_is_logical_device(device)) {
2034                 if (device->devtype == TYPE_DISK)
2035                         count += scnprintf(buffer + count,
2036                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2037                                 "SSDSmartPathCap%c En%c %-12s",
2038                                 device->raid_bypass_configured ? '+' : '-',
2039                                 device->raid_bypass_enabled ? '+' : '-',
2040                                 pqi_raid_level_to_string(device->raid_level));
2041         } else {
2042                 count += scnprintf(buffer + count,
2043                         PQI_DEV_INFO_BUFFER_LENGTH - count,
2044                         "AIO%c", device->aio_enabled ? '+' : '-');
2045                 if (device->devtype == TYPE_DISK ||
2046                         device->devtype == TYPE_ZBC)
2047                         count += scnprintf(buffer + count,
2048                                 PQI_DEV_INFO_BUFFER_LENGTH - count,
2049                                 " qd=%-6d", device->queue_depth);
2050         }
2051
2052         dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2053 }
2054
2055 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2056 {
2057         u32 raid_map1_size;
2058         u32 raid_map2_size;
2059
2060         if (raid_map1 == NULL || raid_map2 == NULL)
2061                 return raid_map1 == raid_map2;
2062
2063         raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2064         raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2065
2066         if (raid_map1_size != raid_map2_size)
2067                 return false;
2068
2069         return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2070 }
2071
2072 /* Assumes the SCSI device list lock is held. */
2073
2074 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2075         struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2076 {
2077         existing_device->device_type = new_device->device_type;
2078         existing_device->bus = new_device->bus;
2079         if (new_device->target_lun_valid) {
2080                 existing_device->target = new_device->target;
2081                 existing_device->lun = new_device->lun;
2082                 existing_device->target_lun_valid = true;
2083         }
2084
2085         /* By definition, the scsi3addr and wwid fields are already the same. */
2086
2087         existing_device->is_physical_device = new_device->is_physical_device;
2088         memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2089         memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2090         existing_device->sas_address = new_device->sas_address;
2091         existing_device->queue_depth = new_device->queue_depth;
2092         existing_device->device_offline = false;
2093         existing_device->lun_count = new_device->lun_count;
2094
2095         if (pqi_is_logical_device(existing_device)) {
2096                 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2097
2098                 if (existing_device->devtype == TYPE_DISK) {
2099                         existing_device->raid_level = new_device->raid_level;
2100                         existing_device->volume_status = new_device->volume_status;
2101                         memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2102                         if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2103                                 kfree(existing_device->raid_map);
2104                                 existing_device->raid_map = new_device->raid_map;
2105                                 /* To prevent this from being freed later. */
2106                                 new_device->raid_map = NULL;
2107                         }
2108                         if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
2109                                 existing_device->raid_io_stats = new_device->raid_io_stats;
2110                                 new_device->raid_io_stats = NULL;
2111                         }
2112                         existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2113                         existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2114                 }
2115         } else {
2116                 existing_device->aio_enabled = new_device->aio_enabled;
2117                 existing_device->aio_handle = new_device->aio_handle;
2118                 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2119                 existing_device->active_path_index = new_device->active_path_index;
2120                 existing_device->phy_id = new_device->phy_id;
2121                 existing_device->path_map = new_device->path_map;
2122                 existing_device->bay = new_device->bay;
2123                 existing_device->box_index = new_device->box_index;
2124                 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2125                 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2126                 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2127                 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2128         }
2129 }
2130
2131 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2132 {
2133         if (device) {
2134                 free_percpu(device->raid_io_stats);
2135                 kfree(device->raid_map);
2136                 kfree(device);
2137         }
2138 }
2139
2140 /*
2141  * Called when exposing a new device to the OS fails in order to re-adjust
2142  * our internal SCSI device list to match the SCSI ML's view.
2143  */
2144
2145 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2146         struct pqi_scsi_dev *device)
2147 {
2148         unsigned long flags;
2149
2150         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2151         list_del(&device->scsi_device_list_entry);
2152         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2153
2154         /* Allow the device structure to be freed later. */
2155         device->keep_device = false;
2156 }
2157
2158 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2159 {
2160         if (device->is_expander_smp_device)
2161                 return device->sas_port != NULL;
2162
2163         return device->sdev != NULL;
2164 }
2165
2166 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
2167 {
2168         unsigned int lun;
2169         struct pqi_tmf_work *tmf_work;
2170
2171         for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
2172                 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
2173 }
2174
2175 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
2176 {
2177         if (pqi_device_in_remove(device))
2178                 return false;
2179
2180         if (device->sdev == NULL)
2181                 return false;
2182
2183         if (!scsi_device_online(device->sdev))
2184                 return false;
2185
2186         return device->rescan;
2187 }
2188
2189 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2190         struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2191 {
2192         int rc;
2193         unsigned int i;
2194         unsigned long flags;
2195         enum pqi_find_result find_result;
2196         struct pqi_scsi_dev *device;
2197         struct pqi_scsi_dev *next;
2198         struct pqi_scsi_dev *matching_device;
2199         LIST_HEAD(add_list);
2200         LIST_HEAD(delete_list);
2201
2202         /*
2203          * The idea here is to do as little work as possible while holding the
2204          * spinlock.  That's why we go to great pains to defer anything other
2205          * than updating the internal device list until after we release the
2206          * spinlock.
2207          */
2208
2209         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2210
2211         /* Assume that all devices in the existing list have gone away. */
2212         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2213                 device->device_gone = true;
2214
2215         for (i = 0; i < num_new_devices; i++) {
2216                 device = new_device_list[i];
2217
2218                 find_result = pqi_scsi_find_entry(ctrl_info, device,
2219                         &matching_device);
2220
2221                 switch (find_result) {
2222                 case DEVICE_SAME:
2223                         /*
2224                          * The newly found device is already in the existing
2225                          * device list.
2226                          */
2227                         device->new_device = false;
2228                         matching_device->device_gone = false;
2229                         pqi_scsi_update_device(ctrl_info, matching_device, device);
2230                         break;
2231                 case DEVICE_NOT_FOUND:
2232                         /*
2233                          * The newly found device is NOT in the existing device
2234                          * list.
2235                          */
2236                         device->new_device = true;
2237                         break;
2238                 case DEVICE_CHANGED:
2239                         /*
2240                          * The original device has gone away and we need to add
2241                          * the new device.
2242                          */
2243                         device->new_device = true;
2244                         break;
2245                 }
2246         }
2247
2248         /* Process all devices that have gone away. */
2249         list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2250                 scsi_device_list_entry) {
2251                 if (device->device_gone) {
2252                         list_del(&device->scsi_device_list_entry);
2253                         list_add_tail(&device->delete_list_entry, &delete_list);
2254                 }
2255         }
2256
2257         /* Process all new devices. */
2258         for (i = 0; i < num_new_devices; i++) {
2259                 device = new_device_list[i];
2260                 if (!device->new_device)
2261                         continue;
2262                 if (device->volume_offline)
2263                         continue;
2264                 list_add_tail(&device->scsi_device_list_entry,
2265                         &ctrl_info->scsi_device_list);
2266                 list_add_tail(&device->add_list_entry, &add_list);
2267                 /* To prevent this device structure from being freed later. */
2268                 device->keep_device = true;
2269                 pqi_init_device_tmf_work(device);
2270         }
2271
2272         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2273
2274         /*
2275          * If OFA is in progress and there are devices that need to be deleted,
2276          * allow any pending reset operations to continue and unblock any SCSI
2277          * requests before removal.
2278          */
2279         if (pqi_ofa_in_progress(ctrl_info)) {
2280                 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2281                         if (pqi_is_device_added(device))
2282                                 pqi_device_remove_start(device);
2283                 pqi_ctrl_unblock_device_reset(ctrl_info);
2284                 pqi_scsi_unblock_requests(ctrl_info);
2285         }
2286
2287         /* Remove all devices that have gone away. */
2288         list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2289                 if (device->volume_offline) {
2290                         pqi_dev_info(ctrl_info, "offline", device);
2291                         pqi_show_volume_status(ctrl_info, device);
2292                 } else {
2293                         pqi_dev_info(ctrl_info, "removed", device);
2294                 }
2295                 if (pqi_is_device_added(device))
2296                         pqi_remove_device(ctrl_info, device);
2297                 list_del(&device->delete_list_entry);
2298                 pqi_free_device(device);
2299         }
2300
2301         /*
2302          * Notify the SML of any existing device changes such as;
2303          * queue depth, device size.
2304          */
2305         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2306                 /*
2307                  * Check for queue depth change.
2308                  */
2309                 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2310                         device->advertised_queue_depth = device->queue_depth;
2311                         scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2312                 }
2313                 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2314                 /*
2315                  * Check for changes in the device, such as size.
2316                  */
2317                 if (pqi_volume_rescan_needed(device)) {
2318                         device->rescan = false;
2319                         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2320                         scsi_rescan_device(device->sdev);
2321                 } else {
2322                         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2323                 }
2324         }
2325
2326         /* Expose any new devices. */
2327         list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2328                 if (!pqi_is_device_added(device)) {
2329                         rc = pqi_add_device(ctrl_info, device);
2330                         if (rc == 0) {
2331                                 pqi_dev_info(ctrl_info, "added", device);
2332                         } else {
2333                                 dev_warn(&ctrl_info->pci_dev->dev,
2334                                         "scsi %d:%d:%d:%d addition failed, device not added\n",
2335                                         ctrl_info->scsi_host->host_no,
2336                                         device->bus, device->target,
2337                                         device->lun);
2338                                 pqi_fixup_botched_add(ctrl_info, device);
2339                         }
2340                 }
2341         }
2342
2343 }
2344
2345 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2346 {
2347         /*
2348          * Only support the HBA controller itself as a RAID
2349          * controller.  If it's a RAID controller other than
2350          * the HBA itself (an external RAID controller, for
2351          * example), we don't support it.
2352          */
2353         if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2354                 !pqi_is_hba_lunid(device->scsi3addr))
2355                         return false;
2356
2357         return true;
2358 }
2359
2360 static inline bool pqi_skip_device(u8 *scsi3addr)
2361 {
2362         /* Ignore all masked devices. */
2363         if (MASKED_DEVICE(scsi3addr))
2364                 return true;
2365
2366         return false;
2367 }
2368
2369 static inline void pqi_mask_device(u8 *scsi3addr)
2370 {
2371         scsi3addr[3] |= 0xc0;
2372 }
2373
2374 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2375 {
2376         return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2377 }
2378
2379 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2380 {
2381         int i;
2382         int rc;
2383         LIST_HEAD(new_device_list_head);
2384         struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2385         struct report_log_lun_list *logdev_list = NULL;
2386         struct report_phys_lun_16byte_wwid *phys_lun;
2387         struct report_log_lun *log_lun;
2388         struct bmic_identify_physical_device *id_phys = NULL;
2389         u32 num_physicals;
2390         u32 num_logicals;
2391         struct pqi_scsi_dev **new_device_list = NULL;
2392         struct pqi_scsi_dev *device;
2393         struct pqi_scsi_dev *next;
2394         unsigned int num_new_devices;
2395         unsigned int num_valid_devices;
2396         bool is_physical_device;
2397         u8 *scsi3addr;
2398         unsigned int physical_index;
2399         unsigned int logical_index;
2400         static char *out_of_memory_msg =
2401                 "failed to allocate memory, device discovery stopped";
2402
2403         rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2404         if (rc)
2405                 goto out;
2406
2407         if (physdev_list)
2408                 num_physicals =
2409                         get_unaligned_be32(&physdev_list->header.list_length)
2410                                 / sizeof(physdev_list->lun_entries[0]);
2411         else
2412                 num_physicals = 0;
2413
2414         if (logdev_list)
2415                 num_logicals =
2416                         get_unaligned_be32(&logdev_list->header.list_length)
2417                                 / sizeof(logdev_list->lun_entries[0]);
2418         else
2419                 num_logicals = 0;
2420
2421         if (num_physicals) {
2422                 /*
2423                  * We need this buffer for calls to pqi_get_physical_disk_info()
2424                  * below.  We allocate it here instead of inside
2425                  * pqi_get_physical_disk_info() because it's a fairly large
2426                  * buffer.
2427                  */
2428                 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2429                 if (!id_phys) {
2430                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2431                                 out_of_memory_msg);
2432                         rc = -ENOMEM;
2433                         goto out;
2434                 }
2435
2436                 if (pqi_hide_vsep) {
2437                         for (i = num_physicals - 1; i >= 0; i--) {
2438                                 phys_lun = &physdev_list->lun_entries[i];
2439                                 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2440                                         pqi_mask_device(phys_lun->lunid);
2441                                         break;
2442                                 }
2443                         }
2444                 }
2445         }
2446
2447         if (num_logicals &&
2448                 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2449                 ctrl_info->lv_drive_type_mix_valid = true;
2450
2451         num_new_devices = num_physicals + num_logicals;
2452
2453         new_device_list = kmalloc_array(num_new_devices,
2454                                         sizeof(*new_device_list),
2455                                         GFP_KERNEL);
2456         if (!new_device_list) {
2457                 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2458                 rc = -ENOMEM;
2459                 goto out;
2460         }
2461
2462         for (i = 0; i < num_new_devices; i++) {
2463                 device = kzalloc(sizeof(*device), GFP_KERNEL);
2464                 if (!device) {
2465                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2466                                 out_of_memory_msg);
2467                         rc = -ENOMEM;
2468                         goto out;
2469                 }
2470                 list_add_tail(&device->new_device_list_entry,
2471                         &new_device_list_head);
2472         }
2473
2474         device = NULL;
2475         num_valid_devices = 0;
2476         physical_index = 0;
2477         logical_index = 0;
2478
2479         for (i = 0; i < num_new_devices; i++) {
2480
2481                 if ((!pqi_expose_ld_first && i < num_physicals) ||
2482                         (pqi_expose_ld_first && i >= num_logicals)) {
2483                         is_physical_device = true;
2484                         phys_lun = &physdev_list->lun_entries[physical_index++];
2485                         log_lun = NULL;
2486                         scsi3addr = phys_lun->lunid;
2487                 } else {
2488                         is_physical_device = false;
2489                         phys_lun = NULL;
2490                         log_lun = &logdev_list->lun_entries[logical_index++];
2491                         scsi3addr = log_lun->lunid;
2492                 }
2493
2494                 if (is_physical_device && pqi_skip_device(scsi3addr))
2495                         continue;
2496
2497                 if (device)
2498                         device = list_next_entry(device, new_device_list_entry);
2499                 else
2500                         device = list_first_entry(&new_device_list_head,
2501                                 struct pqi_scsi_dev, new_device_list_entry);
2502
2503                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2504                 device->is_physical_device = is_physical_device;
2505                 if (is_physical_device) {
2506                         device->device_type = phys_lun->device_type;
2507                         if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2508                                 device->is_expander_smp_device = true;
2509                 } else {
2510                         device->is_external_raid_device =
2511                                 pqi_is_external_raid_addr(scsi3addr);
2512                 }
2513
2514                 if (!pqi_is_supported_device(device))
2515                         continue;
2516
2517                 /* Gather information about the device. */
2518                 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2519                 if (rc == -ENOMEM) {
2520                         dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2521                                 out_of_memory_msg);
2522                         goto out;
2523                 }
2524                 if (rc) {
2525                         if (device->is_physical_device)
2526                                 dev_warn(&ctrl_info->pci_dev->dev,
2527                                         "obtaining device info failed, skipping physical device %016llx%016llx\n",
2528                                         get_unaligned_be64(&phys_lun->wwid[0]),
2529                                         get_unaligned_be64(&phys_lun->wwid[8]));
2530                         else
2531                                 dev_warn(&ctrl_info->pci_dev->dev,
2532                                         "obtaining device info failed, skipping logical device %08x%08x\n",
2533                                         *((u32 *)&device->scsi3addr),
2534                                         *((u32 *)&device->scsi3addr[4]));
2535                         rc = 0;
2536                         continue;
2537                 }
2538
2539                 /* Do not present disks that the OS cannot fully probe. */
2540                 if (pqi_keep_device_offline(device))
2541                         continue;
2542
2543                 pqi_assign_bus_target_lun(device);
2544
2545                 if (device->is_physical_device) {
2546                         memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2547                         if ((phys_lun->device_flags &
2548                                 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2549                                 phys_lun->aio_handle) {
2550                                         device->aio_enabled = true;
2551                                         device->aio_handle =
2552                                                 phys_lun->aio_handle;
2553                         }
2554                 } else {
2555                         memcpy(device->volume_id, log_lun->volume_id,
2556                                 sizeof(device->volume_id));
2557                 }
2558
2559                 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2560
2561                 new_device_list[num_valid_devices++] = device;
2562         }
2563
2564         pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2565
2566 out:
2567         list_for_each_entry_safe(device, next, &new_device_list_head,
2568                 new_device_list_entry) {
2569                 if (device->keep_device)
2570                         continue;
2571                 list_del(&device->new_device_list_entry);
2572                 pqi_free_device(device);
2573         }
2574
2575         kfree(new_device_list);
2576         kfree(physdev_list);
2577         kfree(logdev_list);
2578         kfree(id_phys);
2579
2580         return rc;
2581 }
2582
2583 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2584 {
2585         int rc;
2586         int mutex_acquired;
2587
2588         if (pqi_ctrl_offline(ctrl_info))
2589                 return -ENXIO;
2590
2591         mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2592
2593         if (!mutex_acquired) {
2594                 if (pqi_ctrl_scan_blocked(ctrl_info))
2595                         return -EBUSY;
2596                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2597                 return -EINPROGRESS;
2598         }
2599
2600         rc = pqi_update_scsi_devices(ctrl_info);
2601         if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2602                 pqi_schedule_rescan_worker_delayed(ctrl_info);
2603
2604         mutex_unlock(&ctrl_info->scan_mutex);
2605
2606         return rc;
2607 }
2608
2609 static void pqi_scan_start(struct Scsi_Host *shost)
2610 {
2611         struct pqi_ctrl_info *ctrl_info;
2612
2613         ctrl_info = shost_to_hba(shost);
2614
2615         pqi_scan_scsi_devices(ctrl_info);
2616 }
2617
2618 /* Returns TRUE if scan is finished. */
2619
2620 static int pqi_scan_finished(struct Scsi_Host *shost,
2621         unsigned long elapsed_time)
2622 {
2623         struct pqi_ctrl_info *ctrl_info;
2624
2625         ctrl_info = shost_priv(shost);
2626
2627         return !mutex_is_locked(&ctrl_info->scan_mutex);
2628 }
2629
2630 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2631         struct raid_map *raid_map, u64 first_block)
2632 {
2633         u32 volume_blk_size;
2634
2635         /*
2636          * Set the encryption tweak values based on logical block address.
2637          * If the block size is 512, the tweak value is equal to the LBA.
2638          * For other block sizes, tweak value is (LBA * block size) / 512.
2639          */
2640         volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2641         if (volume_blk_size != 512)
2642                 first_block = (first_block * volume_blk_size) / 512;
2643
2644         encryption_info->data_encryption_key_index =
2645                 get_unaligned_le16(&raid_map->data_encryption_key_index);
2646         encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2647         encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2648 }
2649
2650 /*
2651  * Attempt to perform RAID bypass mapping for a logical volume I/O.
2652  */
2653
2654 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2655         struct pqi_scsi_dev_raid_map_data *rmd)
2656 {
2657         bool is_supported = true;
2658
2659         switch (rmd->raid_level) {
2660         case SA_RAID_0:
2661                 break;
2662         case SA_RAID_1:
2663                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2664                         rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2665                         is_supported = false;
2666                 break;
2667         case SA_RAID_TRIPLE:
2668                 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2669                         rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2670                         is_supported = false;
2671                 break;
2672         case SA_RAID_5:
2673                 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2674                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2675                         is_supported = false;
2676                 break;
2677         case SA_RAID_6:
2678                 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2679                         rmd->data_length > ctrl_info->max_write_raid_5_6))
2680                         is_supported = false;
2681                 break;
2682         default:
2683                 is_supported = false;
2684                 break;
2685         }
2686
2687         return is_supported;
2688 }
2689
2690 #define PQI_RAID_BYPASS_INELIGIBLE      1
2691
2692 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2693         struct pqi_scsi_dev_raid_map_data *rmd)
2694 {
2695         /* Check for valid opcode, get LBA and block count. */
2696         switch (scmd->cmnd[0]) {
2697         case WRITE_6:
2698                 rmd->is_write = true;
2699                 fallthrough;
2700         case READ_6:
2701                 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2702                         (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2703                 rmd->block_cnt = (u32)scmd->cmnd[4];
2704                 if (rmd->block_cnt == 0)
2705                         rmd->block_cnt = 256;
2706                 break;
2707         case WRITE_10:
2708                 rmd->is_write = true;
2709                 fallthrough;
2710         case READ_10:
2711                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2712                 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2713                 break;
2714         case WRITE_12:
2715                 rmd->is_write = true;
2716                 fallthrough;
2717         case READ_12:
2718                 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2719                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2720                 break;
2721         case WRITE_16:
2722                 rmd->is_write = true;
2723                 fallthrough;
2724         case READ_16:
2725                 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2726                 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2727                 break;
2728         default:
2729                 /* Process via normal I/O path. */
2730                 return PQI_RAID_BYPASS_INELIGIBLE;
2731         }
2732
2733         put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2734
2735         return 0;
2736 }
2737
2738 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2739         struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2740 {
2741 #if BITS_PER_LONG == 32
2742         u64 tmpdiv;
2743 #endif
2744
2745         rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2746
2747         /* Check for invalid block or wraparound. */
2748         if (rmd->last_block >=
2749                 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2750                 rmd->last_block < rmd->first_block)
2751                 return PQI_RAID_BYPASS_INELIGIBLE;
2752
2753         rmd->data_disks_per_row =
2754                 get_unaligned_le16(&raid_map->data_disks_per_row);
2755         rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2756         rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2757
2758         /* Calculate stripe information for the request. */
2759         rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2760         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2761                 return PQI_RAID_BYPASS_INELIGIBLE;
2762 #if BITS_PER_LONG == 32
2763         tmpdiv = rmd->first_block;
2764         do_div(tmpdiv, rmd->blocks_per_row);
2765         rmd->first_row = tmpdiv;
2766         tmpdiv = rmd->last_block;
2767         do_div(tmpdiv, rmd->blocks_per_row);
2768         rmd->last_row = tmpdiv;
2769         rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2770         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2771         tmpdiv = rmd->first_row_offset;
2772         do_div(tmpdiv, rmd->strip_size);
2773         rmd->first_column = tmpdiv;
2774         tmpdiv = rmd->last_row_offset;
2775         do_div(tmpdiv, rmd->strip_size);
2776         rmd->last_column = tmpdiv;
2777 #else
2778         rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2779         rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2780         rmd->first_row_offset = (u32)(rmd->first_block -
2781                 (rmd->first_row * rmd->blocks_per_row));
2782         rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2783                 rmd->blocks_per_row));
2784         rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2785         rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2786 #endif
2787
2788         /* If this isn't a single row/column then give to the controller. */
2789         if (rmd->first_row != rmd->last_row ||
2790                 rmd->first_column != rmd->last_column)
2791                 return PQI_RAID_BYPASS_INELIGIBLE;
2792
2793         /* Proceeding with driver mapping. */
2794         rmd->total_disks_per_row = rmd->data_disks_per_row +
2795                 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2796         rmd->map_row = ((u32)(rmd->first_row >>
2797                 raid_map->parity_rotation_shift)) %
2798                 get_unaligned_le16(&raid_map->row_cnt);
2799         rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2800                 rmd->first_column;
2801
2802         return 0;
2803 }
2804
2805 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2806         struct raid_map *raid_map)
2807 {
2808 #if BITS_PER_LONG == 32
2809         u64 tmpdiv;
2810 #endif
2811
2812         if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2813                 return PQI_RAID_BYPASS_INELIGIBLE;
2814
2815         /* RAID 50/60 */
2816         /* Verify first and last block are in same RAID group. */
2817         rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2818 #if BITS_PER_LONG == 32
2819         tmpdiv = rmd->first_block;
2820         rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2821         tmpdiv = rmd->first_group;
2822         do_div(tmpdiv, rmd->blocks_per_row);
2823         rmd->first_group = tmpdiv;
2824         tmpdiv = rmd->last_block;
2825         rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2826         tmpdiv = rmd->last_group;
2827         do_div(tmpdiv, rmd->blocks_per_row);
2828         rmd->last_group = tmpdiv;
2829 #else
2830         rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2831         rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2832 #endif
2833         if (rmd->first_group != rmd->last_group)
2834                 return PQI_RAID_BYPASS_INELIGIBLE;
2835
2836         /* Verify request is in a single row of RAID 5/6. */
2837 #if BITS_PER_LONG == 32
2838         tmpdiv = rmd->first_block;
2839         do_div(tmpdiv, rmd->stripesize);
2840         rmd->first_row = tmpdiv;
2841         rmd->r5or6_first_row = tmpdiv;
2842         tmpdiv = rmd->last_block;
2843         do_div(tmpdiv, rmd->stripesize);
2844         rmd->r5or6_last_row = tmpdiv;
2845 #else
2846         rmd->first_row = rmd->r5or6_first_row =
2847                 rmd->first_block / rmd->stripesize;
2848         rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2849 #endif
2850         if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2851                 return PQI_RAID_BYPASS_INELIGIBLE;
2852
2853         /* Verify request is in a single column. */
2854 #if BITS_PER_LONG == 32
2855         tmpdiv = rmd->first_block;
2856         rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2857         tmpdiv = rmd->first_row_offset;
2858         rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2859         rmd->r5or6_first_row_offset = rmd->first_row_offset;
2860         tmpdiv = rmd->last_block;
2861         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2862         tmpdiv = rmd->r5or6_last_row_offset;
2863         rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2864         tmpdiv = rmd->r5or6_first_row_offset;
2865         do_div(tmpdiv, rmd->strip_size);
2866         rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2867         tmpdiv = rmd->r5or6_last_row_offset;
2868         do_div(tmpdiv, rmd->strip_size);
2869         rmd->r5or6_last_column = tmpdiv;
2870 #else
2871         rmd->first_row_offset = rmd->r5or6_first_row_offset =
2872                 (u32)((rmd->first_block % rmd->stripesize) %
2873                 rmd->blocks_per_row);
2874
2875         rmd->r5or6_last_row_offset =
2876                 (u32)((rmd->last_block % rmd->stripesize) %
2877                 rmd->blocks_per_row);
2878
2879         rmd->first_column =
2880                 rmd->r5or6_first_row_offset / rmd->strip_size;
2881         rmd->r5or6_first_column = rmd->first_column;
2882         rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2883 #endif
2884         if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2885                 return PQI_RAID_BYPASS_INELIGIBLE;
2886
2887         /* Request is eligible. */
2888         rmd->map_row =
2889                 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2890                 get_unaligned_le16(&raid_map->row_cnt);
2891
2892         rmd->map_index = (rmd->first_group *
2893                 (get_unaligned_le16(&raid_map->row_cnt) *
2894                 rmd->total_disks_per_row)) +
2895                 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2896
2897         if (rmd->is_write) {
2898                 u32 index;
2899
2900                 /*
2901                  * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2902                  * parity entries inside the device's raid_map.
2903                  *
2904                  * A device's RAID map is bounded by: number of RAID disks squared.
2905                  *
2906                  * The devices RAID map size is checked during device
2907                  * initialization.
2908                  */
2909                 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2910                 index *= rmd->total_disks_per_row;
2911                 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2912
2913                 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2914                 if (rmd->raid_level == SA_RAID_6) {
2915                         rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2916                         rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2917                 }
2918 #if BITS_PER_LONG == 32
2919                 tmpdiv = rmd->first_block;
2920                 do_div(tmpdiv, rmd->blocks_per_row);
2921                 rmd->row = tmpdiv;
2922 #else
2923                 rmd->row = rmd->first_block / rmd->blocks_per_row;
2924 #endif
2925         }
2926
2927         return 0;
2928 }
2929
2930 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2931 {
2932         /* Build the new CDB for the physical disk I/O. */
2933         if (rmd->disk_block > 0xffffffff) {
2934                 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2935                 rmd->cdb[1] = 0;
2936                 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2937                 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2938                 rmd->cdb[14] = 0;
2939                 rmd->cdb[15] = 0;
2940                 rmd->cdb_length = 16;
2941         } else {
2942                 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2943                 rmd->cdb[1] = 0;
2944                 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2945                 rmd->cdb[6] = 0;
2946                 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2947                 rmd->cdb[9] = 0;
2948                 rmd->cdb_length = 10;
2949         }
2950 }
2951
2952 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2953         struct pqi_scsi_dev_raid_map_data *rmd)
2954 {
2955         u32 index;
2956         u32 group;
2957
2958         group = rmd->map_index / rmd->data_disks_per_row;
2959
2960         index = rmd->map_index - (group * rmd->data_disks_per_row);
2961         rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2962         index += rmd->data_disks_per_row;
2963         rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2964         if (rmd->layout_map_count > 2) {
2965                 index += rmd->data_disks_per_row;
2966                 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2967         }
2968
2969         rmd->num_it_nexus_entries = rmd->layout_map_count;
2970 }
2971
2972 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2973         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2974         struct pqi_queue_group *queue_group)
2975 {
2976         int rc;
2977         struct raid_map *raid_map;
2978         u32 group;
2979         u32 next_bypass_group;
2980         struct pqi_encryption_info *encryption_info_ptr;
2981         struct pqi_encryption_info encryption_info;
2982         struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2983
2984         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2985         if (rc)
2986                 return PQI_RAID_BYPASS_INELIGIBLE;
2987
2988         rmd.raid_level = device->raid_level;
2989
2990         if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2991                 return PQI_RAID_BYPASS_INELIGIBLE;
2992
2993         if (unlikely(rmd.block_cnt == 0))
2994                 return PQI_RAID_BYPASS_INELIGIBLE;
2995
2996         raid_map = device->raid_map;
2997
2998         rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2999         if (rc)
3000                 return PQI_RAID_BYPASS_INELIGIBLE;
3001
3002         if (device->raid_level == SA_RAID_1 ||
3003                 device->raid_level == SA_RAID_TRIPLE) {
3004                 if (rmd.is_write) {
3005                         pqi_calc_aio_r1_nexus(raid_map, &rmd);
3006                 } else {
3007                         group = device->next_bypass_group[rmd.map_index];
3008                         next_bypass_group = group + 1;
3009                         if (next_bypass_group >= rmd.layout_map_count)
3010                                 next_bypass_group = 0;
3011                         device->next_bypass_group[rmd.map_index] = next_bypass_group;
3012                         rmd.map_index += group * rmd.data_disks_per_row;
3013                 }
3014         } else if ((device->raid_level == SA_RAID_5 ||
3015                 device->raid_level == SA_RAID_6) &&
3016                 (rmd.layout_map_count > 1 || rmd.is_write)) {
3017                 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
3018                 if (rc)
3019                         return PQI_RAID_BYPASS_INELIGIBLE;
3020         }
3021
3022         if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
3023                 return PQI_RAID_BYPASS_INELIGIBLE;
3024
3025         rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3026         rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3027                 rmd.first_row * rmd.strip_size +
3028                 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3029         rmd.disk_block_cnt = rmd.block_cnt;
3030
3031         /* Handle differing logical/physical block sizes. */
3032         if (raid_map->phys_blk_shift) {
3033                 rmd.disk_block <<= raid_map->phys_blk_shift;
3034                 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
3035         }
3036
3037         if (unlikely(rmd.disk_block_cnt > 0xffff))
3038                 return PQI_RAID_BYPASS_INELIGIBLE;
3039
3040         pqi_set_aio_cdb(&rmd);
3041
3042         if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
3043                 if (rmd.data_length > device->max_transfer_encrypted)
3044                         return PQI_RAID_BYPASS_INELIGIBLE;
3045                 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3046                 encryption_info_ptr = &encryption_info;
3047         } else {
3048                 encryption_info_ptr = NULL;
3049         }
3050
3051         if (rmd.is_write) {
3052                 switch (device->raid_level) {
3053                 case SA_RAID_1:
3054                 case SA_RAID_TRIPLE:
3055                         return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3056                                 encryption_info_ptr, device, &rmd);
3057                 case SA_RAID_5:
3058                 case SA_RAID_6:
3059                         return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3060                                 encryption_info_ptr, device, &rmd);
3061                 }
3062         }
3063
3064         return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3065                 rmd.cdb, rmd.cdb_length, queue_group,
3066                 encryption_info_ptr, true, false);
3067 }
3068
3069 #define PQI_STATUS_IDLE         0x0
3070
3071 #define PQI_CREATE_ADMIN_QUEUE_PAIR     1
3072 #define PQI_DELETE_ADMIN_QUEUE_PAIR     2
3073
3074 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET             0x0
3075 #define PQI_DEVICE_STATE_STATUS_AVAILABLE               0x1
3076 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY            0x2
3077 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY         0x3
3078 #define PQI_DEVICE_STATE_ERROR                          0x4
3079
3080 #define PQI_MODE_READY_TIMEOUT_SECS             30
3081 #define PQI_MODE_READY_POLL_INTERVAL_MSECS      1
3082
3083 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3084 {
3085         struct pqi_device_registers __iomem *pqi_registers;
3086         unsigned long timeout;
3087         u64 signature;
3088         u8 status;
3089
3090         pqi_registers = ctrl_info->pqi_registers;
3091         timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3092
3093         while (1) {
3094                 signature = readq(&pqi_registers->signature);
3095                 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3096                         sizeof(signature)) == 0)
3097                         break;
3098                 if (time_after(jiffies, timeout)) {
3099                         dev_err(&ctrl_info->pci_dev->dev,
3100                                 "timed out waiting for PQI signature\n");
3101                         return -ETIMEDOUT;
3102                 }
3103                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3104         }
3105
3106         while (1) {
3107                 status = readb(&pqi_registers->function_and_status_code);
3108                 if (status == PQI_STATUS_IDLE)
3109                         break;
3110                 if (time_after(jiffies, timeout)) {
3111                         dev_err(&ctrl_info->pci_dev->dev,
3112                                 "timed out waiting for PQI IDLE\n");
3113                         return -ETIMEDOUT;
3114                 }
3115                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3116         }
3117
3118         while (1) {
3119                 if (readl(&pqi_registers->device_status) ==
3120                         PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3121                         break;
3122                 if (time_after(jiffies, timeout)) {
3123                         dev_err(&ctrl_info->pci_dev->dev,
3124                                 "timed out waiting for PQI all registers ready\n");
3125                         return -ETIMEDOUT;
3126                 }
3127                 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3128         }
3129
3130         return 0;
3131 }
3132
3133 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3134 {
3135         struct pqi_scsi_dev *device;
3136
3137         device = io_request->scmd->device->hostdata;
3138         device->raid_bypass_enabled = false;
3139         device->aio_enabled = false;
3140 }
3141
3142 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3143 {
3144         struct pqi_ctrl_info *ctrl_info;
3145         struct pqi_scsi_dev *device;
3146
3147         device = sdev->hostdata;
3148         if (device->device_offline)
3149                 return;
3150
3151         device->device_offline = true;
3152         ctrl_info = shost_to_hba(sdev->host);
3153         pqi_schedule_rescan_worker(ctrl_info);
3154         dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3155                 path, ctrl_info->scsi_host->host_no, device->bus,
3156                 device->target, device->lun);
3157 }
3158
3159 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3160 {
3161         u8 scsi_status;
3162         u8 host_byte;
3163         struct scsi_cmnd *scmd;
3164         struct pqi_raid_error_info *error_info;
3165         size_t sense_data_length;
3166         int residual_count;
3167         int xfer_count;
3168         struct scsi_sense_hdr sshdr;
3169
3170         scmd = io_request->scmd;
3171         if (!scmd)
3172                 return;
3173
3174         error_info = io_request->error_info;
3175         scsi_status = error_info->status;
3176         host_byte = DID_OK;
3177
3178         switch (error_info->data_out_result) {
3179         case PQI_DATA_IN_OUT_GOOD:
3180                 break;
3181         case PQI_DATA_IN_OUT_UNDERFLOW:
3182                 xfer_count =
3183                         get_unaligned_le32(&error_info->data_out_transferred);
3184                 residual_count = scsi_bufflen(scmd) - xfer_count;
3185                 scsi_set_resid(scmd, residual_count);
3186                 if (xfer_count < scmd->underflow)
3187                         host_byte = DID_SOFT_ERROR;
3188                 break;
3189         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3190         case PQI_DATA_IN_OUT_ABORTED:
3191                 host_byte = DID_ABORT;
3192                 break;
3193         case PQI_DATA_IN_OUT_TIMEOUT:
3194                 host_byte = DID_TIME_OUT;
3195                 break;
3196         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3197         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3198         case PQI_DATA_IN_OUT_BUFFER_ERROR:
3199         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3200         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3201         case PQI_DATA_IN_OUT_ERROR:
3202         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3203         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3204         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3205         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3206         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3207         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3208         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3209         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3210         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3211         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3212         default:
3213                 host_byte = DID_ERROR;
3214                 break;
3215         }
3216
3217         sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3218         if (sense_data_length == 0)
3219                 sense_data_length =
3220                         get_unaligned_le16(&error_info->response_data_length);
3221         if (sense_data_length) {
3222                 if (sense_data_length > sizeof(error_info->data))
3223                         sense_data_length = sizeof(error_info->data);
3224
3225                 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3226                         scsi_normalize_sense(error_info->data,
3227                                 sense_data_length, &sshdr) &&
3228                                 sshdr.sense_key == HARDWARE_ERROR &&
3229                                 sshdr.asc == 0x3e) {
3230                         struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3231                         struct pqi_scsi_dev *device = scmd->device->hostdata;
3232
3233                         switch (sshdr.ascq) {
3234                         case 0x1: /* LOGICAL UNIT FAILURE */
3235                                 if (printk_ratelimit())
3236                                         scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3237                                                 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3238                                 pqi_take_device_offline(scmd->device, "RAID");
3239                                 host_byte = DID_NO_CONNECT;
3240                                 break;
3241
3242                         default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3243                                 if (printk_ratelimit())
3244                                         scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3245                                                 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3246                                 break;
3247                         }
3248                 }
3249
3250                 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3251                         sense_data_length = SCSI_SENSE_BUFFERSIZE;
3252                 memcpy(scmd->sense_buffer, error_info->data,
3253                         sense_data_length);
3254         }
3255
3256         if (pqi_cmd_priv(scmd)->this_residual &&
3257             !pqi_is_logical_device(scmd->device->hostdata) &&
3258             scsi_status == SAM_STAT_CHECK_CONDITION &&
3259             host_byte == DID_OK &&
3260             sense_data_length &&
3261             scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
3262             sshdr.sense_key == ILLEGAL_REQUEST &&
3263             sshdr.asc == 0x26 &&
3264             sshdr.ascq == 0x0) {
3265                 host_byte = DID_NO_CONNECT;
3266                 pqi_take_device_offline(scmd->device, "AIO");
3267                 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
3268         }
3269
3270         scmd->result = scsi_status;
3271         set_host_byte(scmd, host_byte);
3272 }
3273
3274 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3275 {
3276         u8 scsi_status;
3277         u8 host_byte;
3278         struct scsi_cmnd *scmd;
3279         struct pqi_aio_error_info *error_info;
3280         size_t sense_data_length;
3281         int residual_count;
3282         int xfer_count;
3283         bool device_offline;
3284
3285         scmd = io_request->scmd;
3286         error_info = io_request->error_info;
3287         host_byte = DID_OK;
3288         sense_data_length = 0;
3289         device_offline = false;
3290
3291         switch (error_info->service_response) {
3292         case PQI_AIO_SERV_RESPONSE_COMPLETE:
3293                 scsi_status = error_info->status;
3294                 break;
3295         case PQI_AIO_SERV_RESPONSE_FAILURE:
3296                 switch (error_info->status) {
3297                 case PQI_AIO_STATUS_IO_ABORTED:
3298                         scsi_status = SAM_STAT_TASK_ABORTED;
3299                         break;
3300                 case PQI_AIO_STATUS_UNDERRUN:
3301                         scsi_status = SAM_STAT_GOOD;
3302                         residual_count = get_unaligned_le32(
3303                                                 &error_info->residual_count);
3304                         scsi_set_resid(scmd, residual_count);
3305                         xfer_count = scsi_bufflen(scmd) - residual_count;
3306                         if (xfer_count < scmd->underflow)
3307                                 host_byte = DID_SOFT_ERROR;
3308                         break;
3309                 case PQI_AIO_STATUS_OVERRUN:
3310                         scsi_status = SAM_STAT_GOOD;
3311                         break;
3312                 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3313                         pqi_aio_path_disabled(io_request);
3314                         scsi_status = SAM_STAT_GOOD;
3315                         io_request->status = -EAGAIN;
3316                         break;
3317                 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3318                 case PQI_AIO_STATUS_INVALID_DEVICE:
3319                         if (!io_request->raid_bypass) {
3320                                 device_offline = true;
3321                                 pqi_take_device_offline(scmd->device, "AIO");
3322                                 host_byte = DID_NO_CONNECT;
3323                         }
3324                         scsi_status = SAM_STAT_CHECK_CONDITION;
3325                         break;
3326                 case PQI_AIO_STATUS_IO_ERROR:
3327                 default:
3328                         scsi_status = SAM_STAT_CHECK_CONDITION;
3329                         break;
3330                 }
3331                 break;
3332         case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3333         case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3334                 scsi_status = SAM_STAT_GOOD;
3335                 break;
3336         case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3337         case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3338         default:
3339                 scsi_status = SAM_STAT_CHECK_CONDITION;
3340                 break;
3341         }
3342
3343         if (error_info->data_present) {
3344                 sense_data_length =
3345                         get_unaligned_le16(&error_info->data_length);
3346                 if (sense_data_length) {
3347                         if (sense_data_length > sizeof(error_info->data))
3348                                 sense_data_length = sizeof(error_info->data);
3349                         if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3350                                 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3351                         memcpy(scmd->sense_buffer, error_info->data,
3352                                 sense_data_length);
3353                 }
3354         }
3355
3356         if (device_offline && sense_data_length == 0)
3357                 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3358
3359         scmd->result = scsi_status;
3360         set_host_byte(scmd, host_byte);
3361 }
3362
3363 static void pqi_process_io_error(unsigned int iu_type,
3364         struct pqi_io_request *io_request)
3365 {
3366         switch (iu_type) {
3367         case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3368                 pqi_process_raid_io_error(io_request);
3369                 break;
3370         case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3371                 pqi_process_aio_io_error(io_request);
3372                 break;
3373         }
3374 }
3375
3376 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3377         struct pqi_task_management_response *response)
3378 {
3379         int rc;
3380
3381         switch (response->response_code) {
3382         case SOP_TMF_COMPLETE:
3383         case SOP_TMF_FUNCTION_SUCCEEDED:
3384                 rc = 0;
3385                 break;
3386         case SOP_TMF_REJECTED:
3387                 rc = -EAGAIN;
3388                 break;
3389         case SOP_TMF_INCORRECT_LOGICAL_UNIT:
3390                 rc = -ENODEV;
3391                 break;
3392         default:
3393                 rc = -EIO;
3394                 break;
3395         }
3396
3397         if (rc)
3398                 dev_err(&ctrl_info->pci_dev->dev,
3399                         "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3400
3401         return rc;
3402 }
3403
3404 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3405         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3406 {
3407         pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3408 }
3409
3410 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3411 {
3412         int num_responses;
3413         pqi_index_t oq_pi;
3414         pqi_index_t oq_ci;
3415         struct pqi_io_request *io_request;
3416         struct pqi_io_response *response;
3417         u16 request_id;
3418
3419         num_responses = 0;
3420         oq_ci = queue_group->oq_ci_copy;
3421
3422         while (1) {
3423                 oq_pi = readl(queue_group->oq_pi);
3424                 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3425                         pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3426                         dev_err(&ctrl_info->pci_dev->dev,
3427                                 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3428                                 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3429                         return -1;
3430                 }
3431                 if (oq_pi == oq_ci)
3432                         break;
3433
3434                 num_responses++;
3435                 response = queue_group->oq_element_array +
3436                         (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3437
3438                 request_id = get_unaligned_le16(&response->request_id);
3439                 if (request_id >= ctrl_info->max_io_slots) {
3440                         pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3441                         dev_err(&ctrl_info->pci_dev->dev,
3442                                 "request ID in response (%u) out of range (0-%u): producer index: %u  consumer index: %u\n",
3443                                 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3444                         return -1;
3445                 }
3446
3447                 io_request = &ctrl_info->io_request_pool[request_id];
3448                 if (atomic_read(&io_request->refcount) == 0) {
3449                         pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3450                         dev_err(&ctrl_info->pci_dev->dev,
3451                                 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u  consumer index: %u\n",
3452                                 request_id, oq_pi, oq_ci);
3453                         return -1;
3454                 }
3455
3456                 switch (response->header.iu_type) {
3457                 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3458                 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3459                         if (io_request->scmd)
3460                                 io_request->scmd->result = 0;
3461                         fallthrough;
3462                 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3463                         break;
3464                 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3465                         io_request->status =
3466                                 get_unaligned_le16(
3467                                 &((struct pqi_vendor_general_response *)response)->status);
3468                         break;
3469                 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3470                         io_request->status = pqi_interpret_task_management_response(ctrl_info,
3471                                 (void *)response);
3472                         break;
3473                 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3474                         pqi_aio_path_disabled(io_request);
3475                         io_request->status = -EAGAIN;
3476                         break;
3477                 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3478                 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3479                         io_request->error_info = ctrl_info->error_buffer +
3480                                 (get_unaligned_le16(&response->error_index) *
3481                                 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3482                         pqi_process_io_error(response->header.iu_type, io_request);
3483                         break;
3484                 default:
3485                         pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3486                         dev_err(&ctrl_info->pci_dev->dev,
3487                                 "unexpected IU type: 0x%x: producer index: %u  consumer index: %u\n",
3488                                 response->header.iu_type, oq_pi, oq_ci);
3489                         return -1;
3490                 }
3491
3492                 io_request->io_complete_callback(io_request, io_request->context);
3493
3494                 /*
3495                  * Note that the I/O request structure CANNOT BE TOUCHED after
3496                  * returning from the I/O completion callback!
3497                  */
3498                 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3499         }
3500
3501         if (num_responses) {
3502                 queue_group->oq_ci_copy = oq_ci;
3503                 writel(oq_ci, queue_group->oq_ci);
3504         }
3505
3506         return num_responses;
3507 }
3508
3509 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3510         unsigned int ci, unsigned int elements_in_queue)
3511 {
3512         unsigned int num_elements_used;
3513
3514         if (pi >= ci)
3515                 num_elements_used = pi - ci;
3516         else
3517                 num_elements_used = elements_in_queue - ci + pi;
3518
3519         return elements_in_queue - num_elements_used - 1;
3520 }
3521
3522 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3523         struct pqi_event_acknowledge_request *iu, size_t iu_length)
3524 {
3525         pqi_index_t iq_pi;
3526         pqi_index_t iq_ci;
3527         unsigned long flags;
3528         void *next_element;
3529         struct pqi_queue_group *queue_group;
3530
3531         queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3532         put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3533
3534         while (1) {
3535                 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3536
3537                 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3538                 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3539
3540                 if (pqi_num_elements_free(iq_pi, iq_ci,
3541                         ctrl_info->num_elements_per_iq))
3542                         break;
3543
3544                 spin_unlock_irqrestore(
3545                         &queue_group->submit_lock[RAID_PATH], flags);
3546
3547                 if (pqi_ctrl_offline(ctrl_info))
3548                         return;
3549         }
3550
3551         next_element = queue_group->iq_element_array[RAID_PATH] +
3552                 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3553
3554         memcpy(next_element, iu, iu_length);
3555
3556         iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3557         queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3558
3559         /*
3560          * This write notifies the controller that an IU is available to be
3561          * processed.
3562          */
3563         writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3564
3565         spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3566 }
3567
3568 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3569         struct pqi_event *event)
3570 {
3571         struct pqi_event_acknowledge_request request;
3572
3573         memset(&request, 0, sizeof(request));
3574
3575         request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3576         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3577                 &request.header.iu_length);
3578         request.event_type = event->event_type;
3579         put_unaligned_le16(event->event_id, &request.event_id);
3580         put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3581
3582         pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3583 }
3584
3585 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS              30
3586 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS        1
3587
3588 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3589         struct pqi_ctrl_info *ctrl_info)
3590 {
3591         u8 status;
3592         unsigned long timeout;
3593
3594         timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3595
3596         while (1) {
3597                 status = pqi_read_soft_reset_status(ctrl_info);
3598                 if (status & PQI_SOFT_RESET_INITIATE)
3599                         return RESET_INITIATE_DRIVER;
3600
3601                 if (status & PQI_SOFT_RESET_ABORT)
3602                         return RESET_ABORT;
3603
3604                 if (!sis_is_firmware_running(ctrl_info))
3605                         return RESET_NORESPONSE;
3606
3607                 if (time_after(jiffies, timeout)) {
3608                         dev_warn(&ctrl_info->pci_dev->dev,
3609                                 "timed out waiting for soft reset status\n");
3610                         return RESET_TIMEDOUT;
3611                 }
3612
3613                 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3614         }
3615 }
3616
3617 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3618 {
3619         int rc;
3620         unsigned int delay_secs;
3621         enum pqi_soft_reset_status reset_status;
3622
3623         if (ctrl_info->soft_reset_handshake_supported)
3624                 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3625         else
3626                 reset_status = RESET_INITIATE_FIRMWARE;
3627
3628         delay_secs = PQI_POST_RESET_DELAY_SECS;
3629
3630         switch (reset_status) {
3631         case RESET_TIMEDOUT:
3632                 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3633                 fallthrough;
3634         case RESET_INITIATE_DRIVER:
3635                 dev_info(&ctrl_info->pci_dev->dev,
3636                                 "Online Firmware Activation: resetting controller\n");
3637                 sis_soft_reset(ctrl_info);
3638                 fallthrough;
3639         case RESET_INITIATE_FIRMWARE:
3640                 ctrl_info->pqi_mode_enabled = false;
3641                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3642                 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3643                 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3644                 pqi_ctrl_ofa_done(ctrl_info);
3645                 dev_info(&ctrl_info->pci_dev->dev,
3646                                 "Online Firmware Activation: %s\n",
3647                                 rc == 0 ? "SUCCESS" : "FAILED");
3648                 break;
3649         case RESET_ABORT:
3650                 dev_info(&ctrl_info->pci_dev->dev,
3651                                 "Online Firmware Activation ABORTED\n");
3652                 if (ctrl_info->soft_reset_handshake_supported)
3653                         pqi_clear_soft_reset_status(ctrl_info);
3654                 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3655                 pqi_ctrl_ofa_done(ctrl_info);
3656                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3657                 break;
3658         case RESET_NORESPONSE:
3659                 fallthrough;
3660         default:
3661                 dev_err(&ctrl_info->pci_dev->dev,
3662                         "unexpected Online Firmware Activation reset status: 0x%x\n",
3663                         reset_status);
3664                 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3665                 pqi_ctrl_ofa_done(ctrl_info);
3666                 pqi_ofa_ctrl_unquiesce(ctrl_info);
3667                 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3668                 break;
3669         }
3670 }
3671
3672 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3673 {
3674         struct pqi_ctrl_info *ctrl_info;
3675
3676         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3677
3678         pqi_ctrl_ofa_start(ctrl_info);
3679         pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
3680         pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
3681 }
3682
3683 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3684 {
3685         struct pqi_ctrl_info *ctrl_info;
3686         struct pqi_event *event;
3687
3688         ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3689
3690         event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3691
3692         pqi_ofa_ctrl_quiesce(ctrl_info);
3693         pqi_acknowledge_event(ctrl_info, event);
3694         pqi_process_soft_reset(ctrl_info);
3695 }
3696
3697 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3698         struct pqi_event *event)
3699 {
3700         bool ack_event;
3701
3702         ack_event = true;
3703
3704         switch (event->event_id) {
3705         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3706                 dev_info(&ctrl_info->pci_dev->dev,
3707                         "received Online Firmware Activation memory allocation request\n");
3708                 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3709                 break;
3710         case PQI_EVENT_OFA_QUIESCE:
3711                 dev_info(&ctrl_info->pci_dev->dev,
3712                         "received Online Firmware Activation quiesce request\n");
3713                 schedule_work(&ctrl_info->ofa_quiesce_work);
3714                 ack_event = false;
3715                 break;
3716         case PQI_EVENT_OFA_CANCELED:
3717                 dev_info(&ctrl_info->pci_dev->dev,
3718                         "received Online Firmware Activation cancel request: reason: %u\n",
3719                         ctrl_info->ofa_cancel_reason);
3720                 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3721                 pqi_ctrl_ofa_done(ctrl_info);
3722                 break;
3723         default:
3724                 dev_err(&ctrl_info->pci_dev->dev,
3725                         "received unknown Online Firmware Activation request: event ID: %u\n",
3726                         event->event_id);
3727                 break;
3728         }
3729
3730         return ack_event;
3731 }
3732
3733 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
3734 {
3735         unsigned long flags;
3736         struct pqi_scsi_dev *device;
3737
3738         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3739
3740         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
3741                 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
3742                         device->rescan = true;
3743         }
3744
3745         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3746 }
3747
3748 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3749 {
3750         unsigned long flags;
3751         struct pqi_scsi_dev *device;
3752
3753         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3754
3755         list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3756                 if (device->raid_bypass_enabled)
3757                         device->raid_bypass_enabled = false;
3758
3759         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3760 }
3761
3762 static void pqi_event_worker(struct work_struct *work)
3763 {
3764         unsigned int i;
3765         bool rescan_needed;
3766         struct pqi_ctrl_info *ctrl_info;
3767         struct pqi_event *event;
3768         bool ack_event;
3769
3770         ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3771
3772         pqi_ctrl_busy(ctrl_info);
3773         pqi_wait_if_ctrl_blocked(ctrl_info);
3774         if (pqi_ctrl_offline(ctrl_info))
3775                 goto out;
3776
3777         rescan_needed = false;
3778         event = ctrl_info->events;
3779         for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3780                 if (event->pending) {
3781                         event->pending = false;
3782                         if (event->event_type == PQI_EVENT_TYPE_OFA) {
3783                                 ack_event = pqi_ofa_process_event(ctrl_info, event);
3784                         } else {
3785                                 ack_event = true;
3786                                 rescan_needed = true;
3787                                 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3788                                         pqi_mark_volumes_for_rescan(ctrl_info);
3789                                 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3790                                         pqi_disable_raid_bypass(ctrl_info);
3791                         }
3792                         if (ack_event)
3793                                 pqi_acknowledge_event(ctrl_info, event);
3794                 }
3795                 event++;
3796         }
3797
3798 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY         (5 * HZ)
3799
3800         if (rescan_needed)
3801                 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3802                         PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3803
3804 out:
3805         pqi_ctrl_unbusy(ctrl_info);
3806 }
3807
3808 #define PQI_HEARTBEAT_TIMER_INTERVAL    (10 * HZ)
3809
3810 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3811 {
3812         int num_interrupts;
3813         u32 heartbeat_count;
3814         struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3815
3816         pqi_check_ctrl_health(ctrl_info);
3817         if (pqi_ctrl_offline(ctrl_info))
3818                 return;
3819
3820         num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3821         heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3822
3823         if (num_interrupts == ctrl_info->previous_num_interrupts) {
3824                 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3825                         dev_err(&ctrl_info->pci_dev->dev,
3826                                 "no heartbeat detected - last heartbeat count: %u\n",
3827                                 heartbeat_count);
3828                         pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3829                         return;
3830                 }
3831         } else {
3832                 ctrl_info->previous_num_interrupts = num_interrupts;
3833         }
3834
3835         ctrl_info->previous_heartbeat_count = heartbeat_count;
3836         mod_timer(&ctrl_info->heartbeat_timer,
3837                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3838 }
3839
3840 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3841 {
3842         if (!ctrl_info->heartbeat_counter)
3843                 return;
3844
3845         ctrl_info->previous_num_interrupts =
3846                 atomic_read(&ctrl_info->num_interrupts);
3847         ctrl_info->previous_heartbeat_count =
3848                 pqi_read_heartbeat_counter(ctrl_info);
3849
3850         ctrl_info->heartbeat_timer.expires =
3851                 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3852         add_timer(&ctrl_info->heartbeat_timer);
3853 }
3854
3855 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3856 {
3857         del_timer_sync(&ctrl_info->heartbeat_timer);
3858 }
3859
3860 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3861         struct pqi_event *event, struct pqi_event_response *response)
3862 {
3863         switch (event->event_id) {
3864         case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3865                 ctrl_info->ofa_bytes_requested =
3866                         get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3867                 break;
3868         case PQI_EVENT_OFA_CANCELED:
3869                 ctrl_info->ofa_cancel_reason =
3870                         get_unaligned_le16(&response->data.ofa_cancelled.reason);
3871                 break;
3872         }
3873 }
3874
3875 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3876 {
3877         int num_events;
3878         pqi_index_t oq_pi;
3879         pqi_index_t oq_ci;
3880         struct pqi_event_queue *event_queue;
3881         struct pqi_event_response *response;
3882         struct pqi_event *event;
3883         int event_index;
3884
3885         event_queue = &ctrl_info->event_queue;
3886         num_events = 0;
3887         oq_ci = event_queue->oq_ci_copy;
3888
3889         while (1) {
3890                 oq_pi = readl(event_queue->oq_pi);
3891                 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3892                         pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3893                         dev_err(&ctrl_info->pci_dev->dev,
3894                                 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3895                                 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3896                         return -1;
3897                 }
3898
3899                 if (oq_pi == oq_ci)
3900                         break;
3901
3902                 num_events++;
3903                 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3904
3905                 event_index = pqi_event_type_to_event_index(response->event_type);
3906
3907                 if (event_index >= 0 && response->request_acknowledge) {
3908                         event = &ctrl_info->events[event_index];
3909                         event->pending = true;
3910                         event->event_type = response->event_type;
3911                         event->event_id = get_unaligned_le16(&response->event_id);
3912                         event->additional_event_id =
3913                                 get_unaligned_le32(&response->additional_event_id);
3914                         if (event->event_type == PQI_EVENT_TYPE_OFA)
3915                                 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3916                 }
3917
3918                 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3919         }
3920
3921         if (num_events) {
3922                 event_queue->oq_ci_copy = oq_ci;
3923                 writel(oq_ci, event_queue->oq_ci);
3924                 schedule_work(&ctrl_info->event_work);
3925         }
3926
3927         return num_events;
3928 }
3929
3930 #define PQI_LEGACY_INTX_MASK    0x1
3931
3932 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3933 {
3934         u32 intx_mask;
3935         struct pqi_device_registers __iomem *pqi_registers;
3936         volatile void __iomem *register_addr;
3937
3938         pqi_registers = ctrl_info->pqi_registers;
3939
3940         if (enable_intx)
3941                 register_addr = &pqi_registers->legacy_intx_mask_clear;
3942         else
3943                 register_addr = &pqi_registers->legacy_intx_mask_set;
3944
3945         intx_mask = readl(register_addr);
3946         intx_mask |= PQI_LEGACY_INTX_MASK;
3947         writel(intx_mask, register_addr);
3948 }
3949
3950 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3951         enum pqi_irq_mode new_mode)
3952 {
3953         switch (ctrl_info->irq_mode) {
3954         case IRQ_MODE_MSIX:
3955                 switch (new_mode) {
3956                 case IRQ_MODE_MSIX:
3957                         break;
3958                 case IRQ_MODE_INTX:
3959                         pqi_configure_legacy_intx(ctrl_info, true);
3960                         sis_enable_intx(ctrl_info);
3961                         break;
3962                 case IRQ_MODE_NONE:
3963                         break;
3964                 }
3965                 break;
3966         case IRQ_MODE_INTX:
3967                 switch (new_mode) {
3968                 case IRQ_MODE_MSIX:
3969                         pqi_configure_legacy_intx(ctrl_info, false);
3970                         sis_enable_msix(ctrl_info);
3971                         break;
3972                 case IRQ_MODE_INTX:
3973                         break;
3974                 case IRQ_MODE_NONE:
3975                         pqi_configure_legacy_intx(ctrl_info, false);
3976                         break;
3977                 }
3978                 break;
3979         case IRQ_MODE_NONE:
3980                 switch (new_mode) {
3981                 case IRQ_MODE_MSIX:
3982                         sis_enable_msix(ctrl_info);
3983                         break;
3984                 case IRQ_MODE_INTX:
3985                         pqi_configure_legacy_intx(ctrl_info, true);
3986                         sis_enable_intx(ctrl_info);
3987                         break;
3988                 case IRQ_MODE_NONE:
3989                         break;
3990                 }
3991                 break;
3992         }
3993
3994         ctrl_info->irq_mode = new_mode;
3995 }
3996
3997 #define PQI_LEGACY_INTX_PENDING         0x1
3998
3999 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
4000 {
4001         bool valid_irq;
4002         u32 intx_status;
4003
4004         switch (ctrl_info->irq_mode) {
4005         case IRQ_MODE_MSIX:
4006                 valid_irq = true;
4007                 break;
4008         case IRQ_MODE_INTX:
4009                 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
4010                 if (intx_status & PQI_LEGACY_INTX_PENDING)
4011                         valid_irq = true;
4012                 else
4013                         valid_irq = false;
4014                 break;
4015         case IRQ_MODE_NONE:
4016         default:
4017                 valid_irq = false;
4018                 break;
4019         }
4020
4021         return valid_irq;
4022 }
4023
4024 static irqreturn_t pqi_irq_handler(int irq, void *data)
4025 {
4026         struct pqi_ctrl_info *ctrl_info;
4027         struct pqi_queue_group *queue_group;
4028         int num_io_responses_handled;
4029         int num_events_handled;
4030
4031         queue_group = data;
4032         ctrl_info = queue_group->ctrl_info;
4033
4034         if (!pqi_is_valid_irq(ctrl_info))
4035                 return IRQ_NONE;
4036
4037         num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
4038         if (num_io_responses_handled < 0)
4039                 goto out;
4040
4041         if (irq == ctrl_info->event_irq) {
4042                 num_events_handled = pqi_process_event_intr(ctrl_info);
4043                 if (num_events_handled < 0)
4044                         goto out;
4045         } else {
4046                 num_events_handled = 0;
4047         }
4048
4049         if (num_io_responses_handled + num_events_handled > 0)
4050                 atomic_inc(&ctrl_info->num_interrupts);
4051
4052         pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
4053         pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
4054
4055 out:
4056         return IRQ_HANDLED;
4057 }
4058
4059 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4060 {
4061         struct pci_dev *pci_dev = ctrl_info->pci_dev;
4062         int i;
4063         int rc;
4064
4065         ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
4066
4067         for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
4068                 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
4069                         DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4070                 if (rc) {
4071                         dev_err(&pci_dev->dev,
4072                                 "irq %u init failed with error %d\n",
4073                                 pci_irq_vector(pci_dev, i), rc);
4074                         return rc;
4075                 }
4076                 ctrl_info->num_msix_vectors_initialized++;
4077         }
4078
4079         return 0;
4080 }
4081
4082 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4083 {
4084         int i;
4085
4086         for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4087                 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4088                         &ctrl_info->queue_groups[i]);
4089
4090         ctrl_info->num_msix_vectors_initialized = 0;
4091 }
4092
4093 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4094 {
4095         int num_vectors_enabled;
4096         unsigned int flags = PCI_IRQ_MSIX;
4097
4098         if (!pqi_disable_managed_interrupts)
4099                 flags |= PCI_IRQ_AFFINITY;
4100
4101         num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4102                         PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4103                         flags);
4104         if (num_vectors_enabled < 0) {
4105                 dev_err(&ctrl_info->pci_dev->dev,
4106                         "MSI-X init failed with error %d\n",
4107                         num_vectors_enabled);
4108                 return num_vectors_enabled;
4109         }
4110
4111         ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4112         ctrl_info->irq_mode = IRQ_MODE_MSIX;
4113         return 0;
4114 }
4115
4116 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4117 {
4118         if (ctrl_info->num_msix_vectors_enabled) {
4119                 pci_free_irq_vectors(ctrl_info->pci_dev);
4120                 ctrl_info->num_msix_vectors_enabled = 0;
4121         }
4122 }
4123
4124 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4125 {
4126         unsigned int i;
4127         size_t alloc_length;
4128         size_t element_array_length_per_iq;
4129         size_t element_array_length_per_oq;
4130         void *element_array;
4131         void __iomem *next_queue_index;
4132         void *aligned_pointer;
4133         unsigned int num_inbound_queues;
4134         unsigned int num_outbound_queues;
4135         unsigned int num_queue_indexes;
4136         struct pqi_queue_group *queue_group;
4137
4138         element_array_length_per_iq =
4139                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4140                 ctrl_info->num_elements_per_iq;
4141         element_array_length_per_oq =
4142                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4143                 ctrl_info->num_elements_per_oq;
4144         num_inbound_queues = ctrl_info->num_queue_groups * 2;
4145         num_outbound_queues = ctrl_info->num_queue_groups;
4146         num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4147
4148         aligned_pointer = NULL;
4149
4150         for (i = 0; i < num_inbound_queues; i++) {
4151                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4152                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4153                 aligned_pointer += element_array_length_per_iq;
4154         }
4155
4156         for (i = 0; i < num_outbound_queues; i++) {
4157                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4158                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4159                 aligned_pointer += element_array_length_per_oq;
4160         }
4161
4162         aligned_pointer = PTR_ALIGN(aligned_pointer,
4163                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4164         aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4165                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4166
4167         for (i = 0; i < num_queue_indexes; i++) {
4168                 aligned_pointer = PTR_ALIGN(aligned_pointer,
4169                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4170                 aligned_pointer += sizeof(pqi_index_t);
4171         }
4172
4173         alloc_length = (size_t)aligned_pointer +
4174                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4175
4176         alloc_length += PQI_EXTRA_SGL_MEMORY;
4177
4178         ctrl_info->queue_memory_base =
4179                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4180                                    &ctrl_info->queue_memory_base_dma_handle,
4181                                    GFP_KERNEL);
4182
4183         if (!ctrl_info->queue_memory_base)
4184                 return -ENOMEM;
4185
4186         ctrl_info->queue_memory_length = alloc_length;
4187
4188         element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4189                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4190
4191         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4192                 queue_group = &ctrl_info->queue_groups[i];
4193                 queue_group->iq_element_array[RAID_PATH] = element_array;
4194                 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4195                         ctrl_info->queue_memory_base_dma_handle +
4196                                 (element_array - ctrl_info->queue_memory_base);
4197                 element_array += element_array_length_per_iq;
4198                 element_array = PTR_ALIGN(element_array,
4199                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4200                 queue_group->iq_element_array[AIO_PATH] = element_array;
4201                 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4202                         ctrl_info->queue_memory_base_dma_handle +
4203                         (element_array - ctrl_info->queue_memory_base);
4204                 element_array += element_array_length_per_iq;
4205                 element_array = PTR_ALIGN(element_array,
4206                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4207         }
4208
4209         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4210                 queue_group = &ctrl_info->queue_groups[i];
4211                 queue_group->oq_element_array = element_array;
4212                 queue_group->oq_element_array_bus_addr =
4213                         ctrl_info->queue_memory_base_dma_handle +
4214                         (element_array - ctrl_info->queue_memory_base);
4215                 element_array += element_array_length_per_oq;
4216                 element_array = PTR_ALIGN(element_array,
4217                         PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4218         }
4219
4220         ctrl_info->event_queue.oq_element_array = element_array;
4221         ctrl_info->event_queue.oq_element_array_bus_addr =
4222                 ctrl_info->queue_memory_base_dma_handle +
4223                 (element_array - ctrl_info->queue_memory_base);
4224         element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4225                 PQI_EVENT_OQ_ELEMENT_LENGTH;
4226
4227         next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4228                 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4229
4230         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4231                 queue_group = &ctrl_info->queue_groups[i];
4232                 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4233                 queue_group->iq_ci_bus_addr[RAID_PATH] =
4234                         ctrl_info->queue_memory_base_dma_handle +
4235                         (next_queue_index -
4236                         (void __iomem *)ctrl_info->queue_memory_base);
4237                 next_queue_index += sizeof(pqi_index_t);
4238                 next_queue_index = PTR_ALIGN(next_queue_index,
4239                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4240                 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4241                 queue_group->iq_ci_bus_addr[AIO_PATH] =
4242                         ctrl_info->queue_memory_base_dma_handle +
4243                         (next_queue_index -
4244                         (void __iomem *)ctrl_info->queue_memory_base);
4245                 next_queue_index += sizeof(pqi_index_t);
4246                 next_queue_index = PTR_ALIGN(next_queue_index,
4247                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4248                 queue_group->oq_pi = next_queue_index;
4249                 queue_group->oq_pi_bus_addr =
4250                         ctrl_info->queue_memory_base_dma_handle +
4251                         (next_queue_index -
4252                         (void __iomem *)ctrl_info->queue_memory_base);
4253                 next_queue_index += sizeof(pqi_index_t);
4254                 next_queue_index = PTR_ALIGN(next_queue_index,
4255                         PQI_OPERATIONAL_INDEX_ALIGNMENT);
4256         }
4257
4258         ctrl_info->event_queue.oq_pi = next_queue_index;
4259         ctrl_info->event_queue.oq_pi_bus_addr =
4260                 ctrl_info->queue_memory_base_dma_handle +
4261                 (next_queue_index -
4262                 (void __iomem *)ctrl_info->queue_memory_base);
4263
4264         return 0;
4265 }
4266
4267 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4268 {
4269         unsigned int i;
4270         u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4271         u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4272
4273         /*
4274          * Initialize the backpointers to the controller structure in
4275          * each operational queue group structure.
4276          */
4277         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4278                 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4279
4280         /*
4281          * Assign IDs to all operational queues.  Note that the IDs
4282          * assigned to operational IQs are independent of the IDs
4283          * assigned to operational OQs.
4284          */
4285         ctrl_info->event_queue.oq_id = next_oq_id++;
4286         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4287                 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4288                 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4289                 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4290         }
4291
4292         /*
4293          * Assign MSI-X table entry indexes to all queues.  Note that the
4294          * interrupt for the event queue is shared with the first queue group.
4295          */
4296         ctrl_info->event_queue.int_msg_num = 0;
4297         for (i = 0; i < ctrl_info->num_queue_groups; i++)
4298                 ctrl_info->queue_groups[i].int_msg_num = i;
4299
4300         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4301                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4302                 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4303                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4304                 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4305         }
4306 }
4307
4308 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4309 {
4310         size_t alloc_length;
4311         struct pqi_admin_queues_aligned *admin_queues_aligned;
4312         struct pqi_admin_queues *admin_queues;
4313
4314         alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4315                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4316
4317         ctrl_info->admin_queue_memory_base =
4318                 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4319                                    &ctrl_info->admin_queue_memory_base_dma_handle,
4320                                    GFP_KERNEL);
4321
4322         if (!ctrl_info->admin_queue_memory_base)
4323                 return -ENOMEM;
4324
4325         ctrl_info->admin_queue_memory_length = alloc_length;
4326
4327         admin_queues = &ctrl_info->admin_queues;
4328         admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4329                 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4330         admin_queues->iq_element_array =
4331                 &admin_queues_aligned->iq_element_array;
4332         admin_queues->oq_element_array =
4333                 &admin_queues_aligned->oq_element_array;
4334         admin_queues->iq_ci =
4335                 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4336         admin_queues->oq_pi =
4337                 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4338
4339         admin_queues->iq_element_array_bus_addr =
4340                 ctrl_info->admin_queue_memory_base_dma_handle +
4341                 (admin_queues->iq_element_array -
4342                 ctrl_info->admin_queue_memory_base);
4343         admin_queues->oq_element_array_bus_addr =
4344                 ctrl_info->admin_queue_memory_base_dma_handle +
4345                 (admin_queues->oq_element_array -
4346                 ctrl_info->admin_queue_memory_base);
4347         admin_queues->iq_ci_bus_addr =
4348                 ctrl_info->admin_queue_memory_base_dma_handle +
4349                 ((void __iomem *)admin_queues->iq_ci -
4350                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4351         admin_queues->oq_pi_bus_addr =
4352                 ctrl_info->admin_queue_memory_base_dma_handle +
4353                 ((void __iomem *)admin_queues->oq_pi -
4354                 (void __iomem *)ctrl_info->admin_queue_memory_base);
4355
4356         return 0;
4357 }
4358
4359 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES          HZ
4360 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS      1
4361
4362 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4363 {
4364         struct pqi_device_registers __iomem *pqi_registers;
4365         struct pqi_admin_queues *admin_queues;
4366         unsigned long timeout;
4367         u8 status;
4368         u32 reg;
4369
4370         pqi_registers = ctrl_info->pqi_registers;
4371         admin_queues = &ctrl_info->admin_queues;
4372
4373         writeq((u64)admin_queues->iq_element_array_bus_addr,
4374                 &pqi_registers->admin_iq_element_array_addr);
4375         writeq((u64)admin_queues->oq_element_array_bus_addr,
4376                 &pqi_registers->admin_oq_element_array_addr);
4377         writeq((u64)admin_queues->iq_ci_bus_addr,
4378                 &pqi_registers->admin_iq_ci_addr);
4379         writeq((u64)admin_queues->oq_pi_bus_addr,
4380                 &pqi_registers->admin_oq_pi_addr);
4381
4382         reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4383                 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4384                 (admin_queues->int_msg_num << 16);
4385         writel(reg, &pqi_registers->admin_iq_num_elements);
4386
4387         writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4388                 &pqi_registers->function_and_status_code);
4389
4390         timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4391         while (1) {
4392                 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4393                 status = readb(&pqi_registers->function_and_status_code);
4394                 if (status == PQI_STATUS_IDLE)
4395                         break;
4396                 if (time_after(jiffies, timeout))
4397                         return -ETIMEDOUT;
4398         }
4399
4400         /*
4401          * The offset registers are not initialized to the correct
4402          * offsets until *after* the create admin queue pair command
4403          * completes successfully.
4404          */
4405         admin_queues->iq_pi = ctrl_info->iomem_base +
4406                 PQI_DEVICE_REGISTERS_OFFSET +
4407                 readq(&pqi_registers->admin_iq_pi_offset);
4408         admin_queues->oq_ci = ctrl_info->iomem_base +
4409                 PQI_DEVICE_REGISTERS_OFFSET +
4410                 readq(&pqi_registers->admin_oq_ci_offset);
4411
4412         return 0;
4413 }
4414
4415 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4416         struct pqi_general_admin_request *request)
4417 {
4418         struct pqi_admin_queues *admin_queues;
4419         void *next_element;
4420         pqi_index_t iq_pi;
4421
4422         admin_queues = &ctrl_info->admin_queues;
4423         iq_pi = admin_queues->iq_pi_copy;
4424
4425         next_element = admin_queues->iq_element_array +
4426                 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4427
4428         memcpy(next_element, request, sizeof(*request));
4429
4430         iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4431         admin_queues->iq_pi_copy = iq_pi;
4432
4433         /*
4434          * This write notifies the controller that an IU is available to be
4435          * processed.
4436          */
4437         writel(iq_pi, admin_queues->iq_pi);
4438 }
4439
4440 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS  60
4441
4442 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4443         struct pqi_general_admin_response *response)
4444 {
4445         struct pqi_admin_queues *admin_queues;
4446         pqi_index_t oq_pi;
4447         pqi_index_t oq_ci;
4448         unsigned long timeout;
4449
4450         admin_queues = &ctrl_info->admin_queues;
4451         oq_ci = admin_queues->oq_ci_copy;
4452
4453         timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4454
4455         while (1) {
4456                 oq_pi = readl(admin_queues->oq_pi);
4457                 if (oq_pi != oq_ci)
4458                         break;
4459                 if (time_after(jiffies, timeout)) {
4460                         dev_err(&ctrl_info->pci_dev->dev,
4461                                 "timed out waiting for admin response\n");
4462                         return -ETIMEDOUT;
4463                 }
4464                 if (!sis_is_firmware_running(ctrl_info))
4465                         return -ENXIO;
4466                 usleep_range(1000, 2000);
4467         }
4468
4469         memcpy(response, admin_queues->oq_element_array +
4470                 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4471
4472         oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4473         admin_queues->oq_ci_copy = oq_ci;
4474         writel(oq_ci, admin_queues->oq_ci);
4475
4476         return 0;
4477 }
4478
4479 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4480         struct pqi_queue_group *queue_group, enum pqi_io_path path,
4481         struct pqi_io_request *io_request)
4482 {
4483         struct pqi_io_request *next;
4484         void *next_element;
4485         pqi_index_t iq_pi;
4486         pqi_index_t iq_ci;
4487         size_t iu_length;
4488         unsigned long flags;
4489         unsigned int num_elements_needed;
4490         unsigned int num_elements_to_end_of_queue;
4491         size_t copy_count;
4492         struct pqi_iu_header *request;
4493
4494         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4495
4496         if (io_request) {
4497                 io_request->queue_group = queue_group;
4498                 list_add_tail(&io_request->request_list_entry,
4499                         &queue_group->request_list[path]);
4500         }
4501
4502         iq_pi = queue_group->iq_pi_copy[path];
4503
4504         list_for_each_entry_safe(io_request, next,
4505                 &queue_group->request_list[path], request_list_entry) {
4506
4507                 request = io_request->iu;
4508
4509                 iu_length = get_unaligned_le16(&request->iu_length) +
4510                         PQI_REQUEST_HEADER_LENGTH;
4511                 num_elements_needed =
4512                         DIV_ROUND_UP(iu_length,
4513                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4514
4515                 iq_ci = readl(queue_group->iq_ci[path]);
4516
4517                 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4518                         ctrl_info->num_elements_per_iq))
4519                         break;
4520
4521                 put_unaligned_le16(queue_group->oq_id,
4522                         &request->response_queue_id);
4523
4524                 next_element = queue_group->iq_element_array[path] +
4525                         (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4526
4527                 num_elements_to_end_of_queue =
4528                         ctrl_info->num_elements_per_iq - iq_pi;
4529
4530                 if (num_elements_needed <= num_elements_to_end_of_queue) {
4531                         memcpy(next_element, request, iu_length);
4532                 } else {
4533                         copy_count = num_elements_to_end_of_queue *
4534                                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4535                         memcpy(next_element, request, copy_count);
4536                         memcpy(queue_group->iq_element_array[path],
4537                                 (u8 *)request + copy_count,
4538                                 iu_length - copy_count);
4539                 }
4540
4541                 iq_pi = (iq_pi + num_elements_needed) %
4542                         ctrl_info->num_elements_per_iq;
4543
4544                 list_del(&io_request->request_list_entry);
4545         }
4546
4547         if (iq_pi != queue_group->iq_pi_copy[path]) {
4548                 queue_group->iq_pi_copy[path] = iq_pi;
4549                 /*
4550                  * This write notifies the controller that one or more IUs are
4551                  * available to be processed.
4552                  */
4553                 writel(iq_pi, queue_group->iq_pi[path]);
4554         }
4555
4556         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4557 }
4558
4559 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS         10
4560
4561 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4562         struct completion *wait)
4563 {
4564         int rc;
4565
4566         while (1) {
4567                 if (wait_for_completion_io_timeout(wait,
4568                         PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4569                         rc = 0;
4570                         break;
4571                 }
4572
4573                 pqi_check_ctrl_health(ctrl_info);
4574                 if (pqi_ctrl_offline(ctrl_info)) {
4575                         rc = -ENXIO;
4576                         break;
4577                 }
4578         }
4579
4580         return rc;
4581 }
4582
4583 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4584         void *context)
4585 {
4586         struct completion *waiting = context;
4587
4588         complete(waiting);
4589 }
4590
4591 static int pqi_process_raid_io_error_synchronous(
4592         struct pqi_raid_error_info *error_info)
4593 {
4594         int rc = -EIO;
4595
4596         switch (error_info->data_out_result) {
4597         case PQI_DATA_IN_OUT_GOOD:
4598                 if (error_info->status == SAM_STAT_GOOD)
4599                         rc = 0;
4600                 break;
4601         case PQI_DATA_IN_OUT_UNDERFLOW:
4602                 if (error_info->status == SAM_STAT_GOOD ||
4603                         error_info->status == SAM_STAT_CHECK_CONDITION)
4604                         rc = 0;
4605                 break;
4606         case PQI_DATA_IN_OUT_ABORTED:
4607                 rc = PQI_CMD_STATUS_ABORTED;
4608                 break;
4609         }
4610
4611         return rc;
4612 }
4613
4614 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4615 {
4616         return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4617 }
4618
4619 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4620         struct pqi_iu_header *request, unsigned int flags,
4621         struct pqi_raid_error_info *error_info)
4622 {
4623         int rc = 0;
4624         struct pqi_io_request *io_request;
4625         size_t iu_length;
4626         DECLARE_COMPLETION_ONSTACK(wait);
4627
4628         if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4629                 if (down_interruptible(&ctrl_info->sync_request_sem))
4630                         return -ERESTARTSYS;
4631         } else {
4632                 down(&ctrl_info->sync_request_sem);
4633         }
4634
4635         pqi_ctrl_busy(ctrl_info);
4636         /*
4637          * Wait for other admin queue updates such as;
4638          * config table changes, OFA memory updates, ...
4639          */
4640         if (pqi_is_blockable_request(request))
4641                 pqi_wait_if_ctrl_blocked(ctrl_info);
4642
4643         if (pqi_ctrl_offline(ctrl_info)) {
4644                 rc = -ENXIO;
4645                 goto out;
4646         }
4647
4648         io_request = pqi_alloc_io_request(ctrl_info, NULL);
4649
4650         put_unaligned_le16(io_request->index,
4651                 &(((struct pqi_raid_path_request *)request)->request_id));
4652
4653         if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4654                 ((struct pqi_raid_path_request *)request)->error_index =
4655                         ((struct pqi_raid_path_request *)request)->request_id;
4656
4657         iu_length = get_unaligned_le16(&request->iu_length) +
4658                 PQI_REQUEST_HEADER_LENGTH;
4659         memcpy(io_request->iu, request, iu_length);
4660
4661         io_request->io_complete_callback = pqi_raid_synchronous_complete;
4662         io_request->context = &wait;
4663
4664         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4665                 io_request);
4666
4667         pqi_wait_for_completion_io(ctrl_info, &wait);
4668
4669         if (error_info) {
4670                 if (io_request->error_info)
4671                         memcpy(error_info, io_request->error_info, sizeof(*error_info));
4672                 else
4673                         memset(error_info, 0, sizeof(*error_info));
4674         } else if (rc == 0 && io_request->error_info) {
4675                 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4676         }
4677
4678         pqi_free_io_request(io_request);
4679
4680 out:
4681         pqi_ctrl_unbusy(ctrl_info);
4682         up(&ctrl_info->sync_request_sem);
4683
4684         return rc;
4685 }
4686
4687 static int pqi_validate_admin_response(
4688         struct pqi_general_admin_response *response, u8 expected_function_code)
4689 {
4690         if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4691                 return -EINVAL;
4692
4693         if (get_unaligned_le16(&response->header.iu_length) !=
4694                 PQI_GENERAL_ADMIN_IU_LENGTH)
4695                 return -EINVAL;
4696
4697         if (response->function_code != expected_function_code)
4698                 return -EINVAL;
4699
4700         if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4701                 return -EINVAL;
4702
4703         return 0;
4704 }
4705
4706 static int pqi_submit_admin_request_synchronous(
4707         struct pqi_ctrl_info *ctrl_info,
4708         struct pqi_general_admin_request *request,
4709         struct pqi_general_admin_response *response)
4710 {
4711         int rc;
4712
4713         pqi_submit_admin_request(ctrl_info, request);
4714
4715         rc = pqi_poll_for_admin_response(ctrl_info, response);
4716
4717         if (rc == 0)
4718                 rc = pqi_validate_admin_response(response, request->function_code);
4719
4720         return rc;
4721 }
4722
4723 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4724 {
4725         int rc;
4726         struct pqi_general_admin_request request;
4727         struct pqi_general_admin_response response;
4728         struct pqi_device_capability *capability;
4729         struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4730
4731         capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4732         if (!capability)
4733                 return -ENOMEM;
4734
4735         memset(&request, 0, sizeof(request));
4736
4737         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4738         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4739                 &request.header.iu_length);
4740         request.function_code =
4741                 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4742         put_unaligned_le32(sizeof(*capability),
4743                 &request.data.report_device_capability.buffer_length);
4744
4745         rc = pqi_map_single(ctrl_info->pci_dev,
4746                 &request.data.report_device_capability.sg_descriptor,
4747                 capability, sizeof(*capability),
4748                 DMA_FROM_DEVICE);
4749         if (rc)
4750                 goto out;
4751
4752         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4753
4754         pqi_pci_unmap(ctrl_info->pci_dev,
4755                 &request.data.report_device_capability.sg_descriptor, 1,
4756                 DMA_FROM_DEVICE);
4757
4758         if (rc)
4759                 goto out;
4760
4761         if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4762                 rc = -EIO;
4763                 goto out;
4764         }
4765
4766         ctrl_info->max_inbound_queues =
4767                 get_unaligned_le16(&capability->max_inbound_queues);
4768         ctrl_info->max_elements_per_iq =
4769                 get_unaligned_le16(&capability->max_elements_per_iq);
4770         ctrl_info->max_iq_element_length =
4771                 get_unaligned_le16(&capability->max_iq_element_length)
4772                 * 16;
4773         ctrl_info->max_outbound_queues =
4774                 get_unaligned_le16(&capability->max_outbound_queues);
4775         ctrl_info->max_elements_per_oq =
4776                 get_unaligned_le16(&capability->max_elements_per_oq);
4777         ctrl_info->max_oq_element_length =
4778                 get_unaligned_le16(&capability->max_oq_element_length)
4779                 * 16;
4780
4781         sop_iu_layer_descriptor =
4782                 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4783
4784         ctrl_info->max_inbound_iu_length_per_firmware =
4785                 get_unaligned_le16(
4786                         &sop_iu_layer_descriptor->max_inbound_iu_length);
4787         ctrl_info->inbound_spanning_supported =
4788                 sop_iu_layer_descriptor->inbound_spanning_supported;
4789         ctrl_info->outbound_spanning_supported =
4790                 sop_iu_layer_descriptor->outbound_spanning_supported;
4791
4792 out:
4793         kfree(capability);
4794
4795         return rc;
4796 }
4797
4798 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4799 {
4800         if (ctrl_info->max_iq_element_length <
4801                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4802                 dev_err(&ctrl_info->pci_dev->dev,
4803                         "max. inbound queue element length of %d is less than the required length of %d\n",
4804                         ctrl_info->max_iq_element_length,
4805                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4806                 return -EINVAL;
4807         }
4808
4809         if (ctrl_info->max_oq_element_length <
4810                 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4811                 dev_err(&ctrl_info->pci_dev->dev,
4812                         "max. outbound queue element length of %d is less than the required length of %d\n",
4813                         ctrl_info->max_oq_element_length,
4814                         PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4815                 return -EINVAL;
4816         }
4817
4818         if (ctrl_info->max_inbound_iu_length_per_firmware <
4819                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4820                 dev_err(&ctrl_info->pci_dev->dev,
4821                         "max. inbound IU length of %u is less than the min. required length of %d\n",
4822                         ctrl_info->max_inbound_iu_length_per_firmware,
4823                         PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4824                 return -EINVAL;
4825         }
4826
4827         if (!ctrl_info->inbound_spanning_supported) {
4828                 dev_err(&ctrl_info->pci_dev->dev,
4829                         "the controller does not support inbound spanning\n");
4830                 return -EINVAL;
4831         }
4832
4833         if (ctrl_info->outbound_spanning_supported) {
4834                 dev_err(&ctrl_info->pci_dev->dev,
4835                         "the controller supports outbound spanning but this driver does not\n");
4836                 return -EINVAL;
4837         }
4838
4839         return 0;
4840 }
4841
4842 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4843 {
4844         int rc;
4845         struct pqi_event_queue *event_queue;
4846         struct pqi_general_admin_request request;
4847         struct pqi_general_admin_response response;
4848
4849         event_queue = &ctrl_info->event_queue;
4850
4851         /*
4852          * Create OQ (Outbound Queue - device to host queue) to dedicate
4853          * to events.
4854          */
4855         memset(&request, 0, sizeof(request));
4856         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4857         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4858                 &request.header.iu_length);
4859         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4860         put_unaligned_le16(event_queue->oq_id,
4861                 &request.data.create_operational_oq.queue_id);
4862         put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4863                 &request.data.create_operational_oq.element_array_addr);
4864         put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4865                 &request.data.create_operational_oq.pi_addr);
4866         put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4867                 &request.data.create_operational_oq.num_elements);
4868         put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4869                 &request.data.create_operational_oq.element_length);
4870         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4871         put_unaligned_le16(event_queue->int_msg_num,
4872                 &request.data.create_operational_oq.int_msg_num);
4873
4874         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4875                 &response);
4876         if (rc)
4877                 return rc;
4878
4879         event_queue->oq_ci = ctrl_info->iomem_base +
4880                 PQI_DEVICE_REGISTERS_OFFSET +
4881                 get_unaligned_le64(
4882                         &response.data.create_operational_oq.oq_ci_offset);
4883
4884         return 0;
4885 }
4886
4887 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4888         unsigned int group_number)
4889 {
4890         int rc;
4891         struct pqi_queue_group *queue_group;
4892         struct pqi_general_admin_request request;
4893         struct pqi_general_admin_response response;
4894
4895         queue_group = &ctrl_info->queue_groups[group_number];
4896
4897         /*
4898          * Create IQ (Inbound Queue - host to device queue) for
4899          * RAID path.
4900          */
4901         memset(&request, 0, sizeof(request));
4902         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4903         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4904                 &request.header.iu_length);
4905         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4906         put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4907                 &request.data.create_operational_iq.queue_id);
4908         put_unaligned_le64(
4909                 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4910                 &request.data.create_operational_iq.element_array_addr);
4911         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4912                 &request.data.create_operational_iq.ci_addr);
4913         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4914                 &request.data.create_operational_iq.num_elements);
4915         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4916                 &request.data.create_operational_iq.element_length);
4917         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4918
4919         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4920                 &response);
4921         if (rc) {
4922                 dev_err(&ctrl_info->pci_dev->dev,
4923                         "error creating inbound RAID queue\n");
4924                 return rc;
4925         }
4926
4927         queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4928                 PQI_DEVICE_REGISTERS_OFFSET +
4929                 get_unaligned_le64(
4930                         &response.data.create_operational_iq.iq_pi_offset);
4931
4932         /*
4933          * Create IQ (Inbound Queue - host to device queue) for
4934          * Advanced I/O (AIO) path.
4935          */
4936         memset(&request, 0, sizeof(request));
4937         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4938         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4939                 &request.header.iu_length);
4940         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4941         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4942                 &request.data.create_operational_iq.queue_id);
4943         put_unaligned_le64((u64)queue_group->
4944                 iq_element_array_bus_addr[AIO_PATH],
4945                 &request.data.create_operational_iq.element_array_addr);
4946         put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4947                 &request.data.create_operational_iq.ci_addr);
4948         put_unaligned_le16(ctrl_info->num_elements_per_iq,
4949                 &request.data.create_operational_iq.num_elements);
4950         put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4951                 &request.data.create_operational_iq.element_length);
4952         request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4953
4954         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4955                 &response);
4956         if (rc) {
4957                 dev_err(&ctrl_info->pci_dev->dev,
4958                         "error creating inbound AIO queue\n");
4959                 return rc;
4960         }
4961
4962         queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4963                 PQI_DEVICE_REGISTERS_OFFSET +
4964                 get_unaligned_le64(
4965                         &response.data.create_operational_iq.iq_pi_offset);
4966
4967         /*
4968          * Designate the 2nd IQ as the AIO path.  By default, all IQs are
4969          * assumed to be for RAID path I/O unless we change the queue's
4970          * property.
4971          */
4972         memset(&request, 0, sizeof(request));
4973         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4974         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4975                 &request.header.iu_length);
4976         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4977         put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4978                 &request.data.change_operational_iq_properties.queue_id);
4979         put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4980                 &request.data.change_operational_iq_properties.vendor_specific);
4981
4982         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4983                 &response);
4984         if (rc) {
4985                 dev_err(&ctrl_info->pci_dev->dev,
4986                         "error changing queue property\n");
4987                 return rc;
4988         }
4989
4990         /*
4991          * Create OQ (Outbound Queue - device to host queue).
4992          */
4993         memset(&request, 0, sizeof(request));
4994         request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4995         put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4996                 &request.header.iu_length);
4997         request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4998         put_unaligned_le16(queue_group->oq_id,
4999                 &request.data.create_operational_oq.queue_id);
5000         put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
5001                 &request.data.create_operational_oq.element_array_addr);
5002         put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
5003                 &request.data.create_operational_oq.pi_addr);
5004         put_unaligned_le16(ctrl_info->num_elements_per_oq,
5005                 &request.data.create_operational_oq.num_elements);
5006         put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
5007                 &request.data.create_operational_oq.element_length);
5008         request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
5009         put_unaligned_le16(queue_group->int_msg_num,
5010                 &request.data.create_operational_oq.int_msg_num);
5011
5012         rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
5013                 &response);
5014         if (rc) {
5015                 dev_err(&ctrl_info->pci_dev->dev,
5016                         "error creating outbound queue\n");
5017                 return rc;
5018         }
5019
5020         queue_group->oq_ci = ctrl_info->iomem_base +
5021                 PQI_DEVICE_REGISTERS_OFFSET +
5022                 get_unaligned_le64(
5023                         &response.data.create_operational_oq.oq_ci_offset);
5024
5025         return 0;
5026 }
5027
5028 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
5029 {
5030         int rc;
5031         unsigned int i;
5032
5033         rc = pqi_create_event_queue(ctrl_info);
5034         if (rc) {
5035                 dev_err(&ctrl_info->pci_dev->dev,
5036                         "error creating event queue\n");
5037                 return rc;
5038         }
5039
5040         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5041                 rc = pqi_create_queue_group(ctrl_info, i);
5042                 if (rc) {
5043                         dev_err(&ctrl_info->pci_dev->dev,
5044                                 "error creating queue group number %u/%u\n",
5045                                 i, ctrl_info->num_queue_groups);
5046                         return rc;
5047                 }
5048         }
5049
5050         return 0;
5051 }
5052
5053 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH   \
5054         struct_size_t(struct pqi_event_config,  descriptors, PQI_MAX_EVENT_DESCRIPTORS)
5055
5056 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
5057         bool enable_events)
5058 {
5059         int rc;
5060         unsigned int i;
5061         struct pqi_event_config *event_config;
5062         struct pqi_event_descriptor *event_descriptor;
5063         struct pqi_general_management_request request;
5064
5065         event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5066                 GFP_KERNEL);
5067         if (!event_config)
5068                 return -ENOMEM;
5069
5070         memset(&request, 0, sizeof(request));
5071
5072         request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5073         put_unaligned_le16(offsetof(struct pqi_general_management_request,
5074                 data.report_event_configuration.sg_descriptors[1]) -
5075                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5076         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5077                 &request.data.report_event_configuration.buffer_length);
5078
5079         rc = pqi_map_single(ctrl_info->pci_dev,
5080                 request.data.report_event_configuration.sg_descriptors,
5081                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5082                 DMA_FROM_DEVICE);
5083         if (rc)
5084                 goto out;
5085
5086         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5087
5088         pqi_pci_unmap(ctrl_info->pci_dev,
5089                 request.data.report_event_configuration.sg_descriptors, 1,
5090                 DMA_FROM_DEVICE);
5091
5092         if (rc)
5093                 goto out;
5094
5095         for (i = 0; i < event_config->num_event_descriptors; i++) {
5096                 event_descriptor = &event_config->descriptors[i];
5097                 if (enable_events &&
5098                         pqi_is_supported_event(event_descriptor->event_type))
5099                                 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5100                                         &event_descriptor->oq_id);
5101                 else
5102                         put_unaligned_le16(0, &event_descriptor->oq_id);
5103         }
5104
5105         memset(&request, 0, sizeof(request));
5106
5107         request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5108         put_unaligned_le16(offsetof(struct pqi_general_management_request,
5109                 data.report_event_configuration.sg_descriptors[1]) -
5110                 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5111         put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5112                 &request.data.report_event_configuration.buffer_length);
5113
5114         rc = pqi_map_single(ctrl_info->pci_dev,
5115                 request.data.report_event_configuration.sg_descriptors,
5116                 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5117                 DMA_TO_DEVICE);
5118         if (rc)
5119                 goto out;
5120
5121         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5122
5123         pqi_pci_unmap(ctrl_info->pci_dev,
5124                 request.data.report_event_configuration.sg_descriptors, 1,
5125                 DMA_TO_DEVICE);
5126
5127 out:
5128         kfree(event_config);
5129
5130         return rc;
5131 }
5132
5133 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5134 {
5135         return pqi_configure_events(ctrl_info, true);
5136 }
5137
5138 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5139 {
5140         unsigned int i;
5141         struct device *dev;
5142         size_t sg_chain_buffer_length;
5143         struct pqi_io_request *io_request;
5144
5145         if (!ctrl_info->io_request_pool)
5146                 return;
5147
5148         dev = &ctrl_info->pci_dev->dev;
5149         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5150         io_request = ctrl_info->io_request_pool;
5151
5152         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5153                 kfree(io_request->iu);
5154                 if (!io_request->sg_chain_buffer)
5155                         break;
5156                 dma_free_coherent(dev, sg_chain_buffer_length,
5157                         io_request->sg_chain_buffer,
5158                         io_request->sg_chain_buffer_dma_handle);
5159                 io_request++;
5160         }
5161
5162         kfree(ctrl_info->io_request_pool);
5163         ctrl_info->io_request_pool = NULL;
5164 }
5165
5166 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5167 {
5168         ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5169                                      ctrl_info->error_buffer_length,
5170                                      &ctrl_info->error_buffer_dma_handle,
5171                                      GFP_KERNEL);
5172         if (!ctrl_info->error_buffer)
5173                 return -ENOMEM;
5174
5175         return 0;
5176 }
5177
5178 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5179 {
5180         unsigned int i;
5181         void *sg_chain_buffer;
5182         size_t sg_chain_buffer_length;
5183         dma_addr_t sg_chain_buffer_dma_handle;
5184         struct device *dev;
5185         struct pqi_io_request *io_request;
5186
5187         ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5188                 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5189
5190         if (!ctrl_info->io_request_pool) {
5191                 dev_err(&ctrl_info->pci_dev->dev,
5192                         "failed to allocate I/O request pool\n");
5193                 goto error;
5194         }
5195
5196         dev = &ctrl_info->pci_dev->dev;
5197         sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5198         io_request = ctrl_info->io_request_pool;
5199
5200         for (i = 0; i < ctrl_info->max_io_slots; i++) {
5201                 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5202
5203                 if (!io_request->iu) {
5204                         dev_err(&ctrl_info->pci_dev->dev,
5205                                 "failed to allocate IU buffers\n");
5206                         goto error;
5207                 }
5208
5209                 sg_chain_buffer = dma_alloc_coherent(dev,
5210                         sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5211                         GFP_KERNEL);
5212
5213                 if (!sg_chain_buffer) {
5214                         dev_err(&ctrl_info->pci_dev->dev,
5215                                 "failed to allocate PQI scatter-gather chain buffers\n");
5216                         goto error;
5217                 }
5218
5219                 io_request->index = i;
5220                 io_request->sg_chain_buffer = sg_chain_buffer;
5221                 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5222                 io_request++;
5223         }
5224
5225         return 0;
5226
5227 error:
5228         pqi_free_all_io_requests(ctrl_info);
5229
5230         return -ENOMEM;
5231 }
5232
5233 /*
5234  * Calculate required resources that are sized based on max. outstanding
5235  * requests and max. transfer size.
5236  */
5237
5238 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5239 {
5240         u32 max_transfer_size;
5241         u32 max_sg_entries;
5242
5243         ctrl_info->scsi_ml_can_queue =
5244                 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5245         ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5246
5247         ctrl_info->error_buffer_length =
5248                 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5249
5250         if (reset_devices)
5251                 max_transfer_size = min(ctrl_info->max_transfer_size,
5252                         PQI_MAX_TRANSFER_SIZE_KDUMP);
5253         else
5254                 max_transfer_size = min(ctrl_info->max_transfer_size,
5255                         PQI_MAX_TRANSFER_SIZE);
5256
5257         max_sg_entries = max_transfer_size / PAGE_SIZE;
5258
5259         /* +1 to cover when the buffer is not page-aligned. */
5260         max_sg_entries++;
5261
5262         max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5263
5264         max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5265
5266         ctrl_info->sg_chain_buffer_length =
5267                 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5268                 PQI_EXTRA_SGL_MEMORY;
5269         ctrl_info->sg_tablesize = max_sg_entries;
5270         ctrl_info->max_sectors = max_transfer_size / 512;
5271 }
5272
5273 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5274 {
5275         int num_queue_groups;
5276         u16 num_elements_per_iq;
5277         u16 num_elements_per_oq;
5278
5279         if (reset_devices) {
5280                 num_queue_groups = 1;
5281         } else {
5282                 int num_cpus;
5283                 int max_queue_groups;
5284
5285                 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5286                         ctrl_info->max_outbound_queues - 1);
5287                 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5288
5289                 num_cpus = num_online_cpus();
5290                 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5291                 num_queue_groups = min(num_queue_groups, max_queue_groups);
5292         }
5293
5294         ctrl_info->num_queue_groups = num_queue_groups;
5295
5296         /*
5297          * Make sure that the max. inbound IU length is an even multiple
5298          * of our inbound element length.
5299          */
5300         ctrl_info->max_inbound_iu_length =
5301                 (ctrl_info->max_inbound_iu_length_per_firmware /
5302                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5303                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5304
5305         num_elements_per_iq =
5306                 (ctrl_info->max_inbound_iu_length /
5307                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5308
5309         /* Add one because one element in each queue is unusable. */
5310         num_elements_per_iq++;
5311
5312         num_elements_per_iq = min(num_elements_per_iq,
5313                 ctrl_info->max_elements_per_iq);
5314
5315         num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5316         num_elements_per_oq = min(num_elements_per_oq,
5317                 ctrl_info->max_elements_per_oq);
5318
5319         ctrl_info->num_elements_per_iq = num_elements_per_iq;
5320         ctrl_info->num_elements_per_oq = num_elements_per_oq;
5321
5322         ctrl_info->max_sg_per_iu =
5323                 ((ctrl_info->max_inbound_iu_length -
5324                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5325                 sizeof(struct pqi_sg_descriptor)) +
5326                 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5327
5328         ctrl_info->max_sg_per_r56_iu =
5329                 ((ctrl_info->max_inbound_iu_length -
5330                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5331                 sizeof(struct pqi_sg_descriptor)) +
5332                 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5333 }
5334
5335 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5336         struct scatterlist *sg)
5337 {
5338         u64 address = (u64)sg_dma_address(sg);
5339         unsigned int length = sg_dma_len(sg);
5340
5341         put_unaligned_le64(address, &sg_descriptor->address);
5342         put_unaligned_le32(length, &sg_descriptor->length);
5343         put_unaligned_le32(0, &sg_descriptor->flags);
5344 }
5345
5346 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5347         struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5348         int max_sg_per_iu, bool *chained)
5349 {
5350         int i;
5351         unsigned int num_sg_in_iu;
5352
5353         *chained = false;
5354         i = 0;
5355         num_sg_in_iu = 0;
5356         max_sg_per_iu--;        /* Subtract 1 to leave room for chain marker. */
5357
5358         while (1) {
5359                 pqi_set_sg_descriptor(sg_descriptor, sg);
5360                 if (!*chained)
5361                         num_sg_in_iu++;
5362                 i++;
5363                 if (i == sg_count)
5364                         break;
5365                 sg_descriptor++;
5366                 if (i == max_sg_per_iu) {
5367                         put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5368                                 &sg_descriptor->address);
5369                         put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5370                                 &sg_descriptor->length);
5371                         put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5372                         *chained = true;
5373                         num_sg_in_iu++;
5374                         sg_descriptor = io_request->sg_chain_buffer;
5375                 }
5376                 sg = sg_next(sg);
5377         }
5378
5379         put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5380
5381         return num_sg_in_iu;
5382 }
5383
5384 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5385         struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5386         struct pqi_io_request *io_request)
5387 {
5388         u16 iu_length;
5389         int sg_count;
5390         bool chained;
5391         unsigned int num_sg_in_iu;
5392         struct scatterlist *sg;
5393         struct pqi_sg_descriptor *sg_descriptor;
5394
5395         sg_count = scsi_dma_map(scmd);
5396         if (sg_count < 0)
5397                 return sg_count;
5398
5399         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5400                 PQI_REQUEST_HEADER_LENGTH;
5401
5402         if (sg_count == 0)
5403                 goto out;
5404
5405         sg = scsi_sglist(scmd);
5406         sg_descriptor = request->sg_descriptors;
5407
5408         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5409                 ctrl_info->max_sg_per_iu, &chained);
5410
5411         request->partial = chained;
5412         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5413
5414 out:
5415         put_unaligned_le16(iu_length, &request->header.iu_length);
5416
5417         return 0;
5418 }
5419
5420 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5421         struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5422         struct pqi_io_request *io_request)
5423 {
5424         u16 iu_length;
5425         int sg_count;
5426         bool chained;
5427         unsigned int num_sg_in_iu;
5428         struct scatterlist *sg;
5429         struct pqi_sg_descriptor *sg_descriptor;
5430
5431         sg_count = scsi_dma_map(scmd);
5432         if (sg_count < 0)
5433                 return sg_count;
5434
5435         iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5436                 PQI_REQUEST_HEADER_LENGTH;
5437         num_sg_in_iu = 0;
5438
5439         if (sg_count == 0)
5440                 goto out;
5441
5442         sg = scsi_sglist(scmd);
5443         sg_descriptor = request->sg_descriptors;
5444
5445         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5446                 ctrl_info->max_sg_per_iu, &chained);
5447
5448         request->partial = chained;
5449         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5450
5451 out:
5452         put_unaligned_le16(iu_length, &request->header.iu_length);
5453         request->num_sg_descriptors = num_sg_in_iu;
5454
5455         return 0;
5456 }
5457
5458 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5459         struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5460         struct pqi_io_request *io_request)
5461 {
5462         u16 iu_length;
5463         int sg_count;
5464         bool chained;
5465         unsigned int num_sg_in_iu;
5466         struct scatterlist *sg;
5467         struct pqi_sg_descriptor *sg_descriptor;
5468
5469         sg_count = scsi_dma_map(scmd);
5470         if (sg_count < 0)
5471                 return sg_count;
5472
5473         iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5474                 PQI_REQUEST_HEADER_LENGTH;
5475         num_sg_in_iu = 0;
5476
5477         if (sg_count != 0) {
5478                 sg = scsi_sglist(scmd);
5479                 sg_descriptor = request->sg_descriptors;
5480
5481                 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5482                         ctrl_info->max_sg_per_r56_iu, &chained);
5483
5484                 request->partial = chained;
5485                 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5486         }
5487
5488         put_unaligned_le16(iu_length, &request->header.iu_length);
5489         request->num_sg_descriptors = num_sg_in_iu;
5490
5491         return 0;
5492 }
5493
5494 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5495         struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5496         struct pqi_io_request *io_request)
5497 {
5498         u16 iu_length;
5499         int sg_count;
5500         bool chained;
5501         unsigned int num_sg_in_iu;
5502         struct scatterlist *sg;
5503         struct pqi_sg_descriptor *sg_descriptor;
5504
5505         sg_count = scsi_dma_map(scmd);
5506         if (sg_count < 0)
5507                 return sg_count;
5508
5509         iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5510                 PQI_REQUEST_HEADER_LENGTH;
5511         num_sg_in_iu = 0;
5512
5513         if (sg_count == 0)
5514                 goto out;
5515
5516         sg = scsi_sglist(scmd);
5517         sg_descriptor = request->sg_descriptors;
5518
5519         num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5520                 ctrl_info->max_sg_per_iu, &chained);
5521
5522         request->partial = chained;
5523         iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5524
5525 out:
5526         put_unaligned_le16(iu_length, &request->header.iu_length);
5527         request->num_sg_descriptors = num_sg_in_iu;
5528
5529         return 0;
5530 }
5531
5532 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5533         void *context)
5534 {
5535         struct scsi_cmnd *scmd;
5536
5537         scmd = io_request->scmd;
5538         pqi_free_io_request(io_request);
5539         scsi_dma_unmap(scmd);
5540         pqi_scsi_done(scmd);
5541 }
5542
5543 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5544         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5545         struct pqi_queue_group *queue_group, bool io_high_prio)
5546 {
5547         int rc;
5548         size_t cdb_length;
5549         struct pqi_io_request *io_request;
5550         struct pqi_raid_path_request *request;
5551
5552         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5553         if (!io_request)
5554                 return SCSI_MLQUEUE_HOST_BUSY;
5555
5556         io_request->io_complete_callback = pqi_raid_io_complete;
5557         io_request->scmd = scmd;
5558
5559         request = io_request->iu;
5560         memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5561
5562         request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5563         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5564         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5565         request->command_priority = io_high_prio;
5566         put_unaligned_le16(io_request->index, &request->request_id);
5567         request->error_index = request->request_id;
5568         memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5569         request->ml_device_lun_number = (u8)scmd->device->lun;
5570
5571         cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5572         memcpy(request->cdb, scmd->cmnd, cdb_length);
5573
5574         switch (cdb_length) {
5575         case 6:
5576         case 10:
5577         case 12:
5578         case 16:
5579                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5580                 break;
5581         case 20:
5582                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5583                 break;
5584         case 24:
5585                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5586                 break;
5587         case 28:
5588                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5589                 break;
5590         case 32:
5591         default:
5592                 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5593                 break;
5594         }
5595
5596         switch (scmd->sc_data_direction) {
5597         case DMA_FROM_DEVICE:
5598                 request->data_direction = SOP_READ_FLAG;
5599                 break;
5600         case DMA_TO_DEVICE:
5601                 request->data_direction = SOP_WRITE_FLAG;
5602                 break;
5603         case DMA_NONE:
5604                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5605                 break;
5606         case DMA_BIDIRECTIONAL:
5607                 request->data_direction = SOP_BIDIRECTIONAL;
5608                 break;
5609         default:
5610                 dev_err(&ctrl_info->pci_dev->dev,
5611                         "unknown data direction: %d\n",
5612                         scmd->sc_data_direction);
5613                 break;
5614         }
5615
5616         rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5617         if (rc) {
5618                 pqi_free_io_request(io_request);
5619                 return SCSI_MLQUEUE_HOST_BUSY;
5620         }
5621
5622         pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5623
5624         return 0;
5625 }
5626
5627 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5628         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5629         struct pqi_queue_group *queue_group)
5630 {
5631         bool io_high_prio;
5632
5633         io_high_prio = pqi_is_io_high_priority(device, scmd);
5634
5635         return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5636 }
5637
5638 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5639 {
5640         struct scsi_cmnd *scmd;
5641         struct pqi_scsi_dev *device;
5642         struct pqi_ctrl_info *ctrl_info;
5643
5644         if (!io_request->raid_bypass)
5645                 return false;
5646
5647         scmd = io_request->scmd;
5648         if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5649                 return false;
5650         if (host_byte(scmd->result) == DID_NO_CONNECT)
5651                 return false;
5652
5653         device = scmd->device->hostdata;
5654         if (pqi_device_offline(device) || pqi_device_in_remove(device))
5655                 return false;
5656
5657         ctrl_info = shost_to_hba(scmd->device->host);
5658         if (pqi_ctrl_offline(ctrl_info))
5659                 return false;
5660
5661         return true;
5662 }
5663
5664 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5665         void *context)
5666 {
5667         struct scsi_cmnd *scmd;
5668
5669         scmd = io_request->scmd;
5670         scsi_dma_unmap(scmd);
5671         if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5672                 set_host_byte(scmd, DID_IMM_RETRY);
5673                 pqi_cmd_priv(scmd)->this_residual++;
5674         }
5675
5676         pqi_free_io_request(io_request);
5677         pqi_scsi_done(scmd);
5678 }
5679
5680 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5681         struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5682         struct pqi_queue_group *queue_group)
5683 {
5684         bool io_high_prio;
5685
5686         io_high_prio = pqi_is_io_high_priority(device, scmd);
5687
5688         return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5689                 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5690                 false, io_high_prio);
5691 }
5692
5693 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5694         struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5695         unsigned int cdb_length, struct pqi_queue_group *queue_group,
5696         struct pqi_encryption_info *encryption_info, bool raid_bypass,
5697         bool io_high_prio)
5698 {
5699         int rc;
5700         struct pqi_io_request *io_request;
5701         struct pqi_aio_path_request *request;
5702
5703         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5704         if (!io_request)
5705                 return SCSI_MLQUEUE_HOST_BUSY;
5706
5707         io_request->io_complete_callback = pqi_aio_io_complete;
5708         io_request->scmd = scmd;
5709         io_request->raid_bypass = raid_bypass;
5710
5711         request = io_request->iu;
5712         memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5713
5714         request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5715         put_unaligned_le32(aio_handle, &request->nexus_id);
5716         put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5717         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5718         request->command_priority = io_high_prio;
5719         put_unaligned_le16(io_request->index, &request->request_id);
5720         request->error_index = request->request_id;
5721         if (!raid_bypass && ctrl_info->multi_lun_device_supported)
5722                 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
5723         if (cdb_length > sizeof(request->cdb))
5724                 cdb_length = sizeof(request->cdb);
5725         request->cdb_length = cdb_length;
5726         memcpy(request->cdb, cdb, cdb_length);
5727
5728         switch (scmd->sc_data_direction) {
5729         case DMA_TO_DEVICE:
5730                 request->data_direction = SOP_READ_FLAG;
5731                 break;
5732         case DMA_FROM_DEVICE:
5733                 request->data_direction = SOP_WRITE_FLAG;
5734                 break;
5735         case DMA_NONE:
5736                 request->data_direction = SOP_NO_DIRECTION_FLAG;
5737                 break;
5738         case DMA_BIDIRECTIONAL:
5739                 request->data_direction = SOP_BIDIRECTIONAL;
5740                 break;
5741         default:
5742                 dev_err(&ctrl_info->pci_dev->dev,
5743                         "unknown data direction: %d\n",
5744                         scmd->sc_data_direction);
5745                 break;
5746         }
5747
5748         if (encryption_info) {
5749                 request->encryption_enable = true;
5750                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5751                         &request->data_encryption_key_index);
5752                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5753                         &request->encrypt_tweak_lower);
5754                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5755                         &request->encrypt_tweak_upper);
5756         }
5757
5758         rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5759         if (rc) {
5760                 pqi_free_io_request(io_request);
5761                 return SCSI_MLQUEUE_HOST_BUSY;
5762         }
5763
5764         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5765
5766         return 0;
5767 }
5768
5769 static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5770         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5771         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5772         struct pqi_scsi_dev_raid_map_data *rmd)
5773 {
5774         int rc;
5775         struct pqi_io_request *io_request;
5776         struct pqi_aio_r1_path_request *r1_request;
5777
5778         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5779         if (!io_request)
5780                 return SCSI_MLQUEUE_HOST_BUSY;
5781
5782         io_request->io_complete_callback = pqi_aio_io_complete;
5783         io_request->scmd = scmd;
5784         io_request->raid_bypass = true;
5785
5786         r1_request = io_request->iu;
5787         memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5788
5789         r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5790         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5791         r1_request->num_drives = rmd->num_it_nexus_entries;
5792         put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5793         put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5794         if (rmd->num_it_nexus_entries == 3)
5795                 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5796
5797         put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5798         r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5799         put_unaligned_le16(io_request->index, &r1_request->request_id);
5800         r1_request->error_index = r1_request->request_id;
5801         if (rmd->cdb_length > sizeof(r1_request->cdb))
5802                 rmd->cdb_length = sizeof(r1_request->cdb);
5803         r1_request->cdb_length = rmd->cdb_length;
5804         memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5805
5806         /* The direction is always write. */
5807         r1_request->data_direction = SOP_READ_FLAG;
5808
5809         if (encryption_info) {
5810                 r1_request->encryption_enable = true;
5811                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5812                                 &r1_request->data_encryption_key_index);
5813                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5814                                 &r1_request->encrypt_tweak_lower);
5815                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5816                                 &r1_request->encrypt_tweak_upper);
5817         }
5818
5819         rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5820         if (rc) {
5821                 pqi_free_io_request(io_request);
5822                 return SCSI_MLQUEUE_HOST_BUSY;
5823         }
5824
5825         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5826
5827         return 0;
5828 }
5829
5830 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5831         struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5832         struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5833         struct pqi_scsi_dev_raid_map_data *rmd)
5834 {
5835         int rc;
5836         struct pqi_io_request *io_request;
5837         struct pqi_aio_r56_path_request *r56_request;
5838
5839         io_request = pqi_alloc_io_request(ctrl_info, scmd);
5840         if (!io_request)
5841                 return SCSI_MLQUEUE_HOST_BUSY;
5842         io_request->io_complete_callback = pqi_aio_io_complete;
5843         io_request->scmd = scmd;
5844         io_request->raid_bypass = true;
5845
5846         r56_request = io_request->iu;
5847         memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5848
5849         if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5850                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5851         else
5852                 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5853
5854         put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5855         put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5856         put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5857         if (rmd->raid_level == SA_RAID_6) {
5858                 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5859                 r56_request->xor_multiplier = rmd->xor_mult;
5860         }
5861         put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5862         r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5863         put_unaligned_le64(rmd->row, &r56_request->row);
5864
5865         put_unaligned_le16(io_request->index, &r56_request->request_id);
5866         r56_request->error_index = r56_request->request_id;
5867
5868         if (rmd->cdb_length > sizeof(r56_request->cdb))
5869                 rmd->cdb_length = sizeof(r56_request->cdb);
5870         r56_request->cdb_length = rmd->cdb_length;
5871         memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5872
5873         /* The direction is always write. */
5874         r56_request->data_direction = SOP_READ_FLAG;
5875
5876         if (encryption_info) {
5877                 r56_request->encryption_enable = true;
5878                 put_unaligned_le16(encryption_info->data_encryption_key_index,
5879                                 &r56_request->data_encryption_key_index);
5880                 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5881                                 &r56_request->encrypt_tweak_lower);
5882                 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5883                                 &r56_request->encrypt_tweak_upper);
5884         }
5885
5886         rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5887         if (rc) {
5888                 pqi_free_io_request(io_request);
5889                 return SCSI_MLQUEUE_HOST_BUSY;
5890         }
5891
5892         pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5893
5894         return 0;
5895 }
5896
5897 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5898         struct scsi_cmnd *scmd)
5899 {
5900         /*
5901          * We are setting host_tagset = 1 during init.
5902          */
5903         return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5904 }
5905
5906 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5907 {
5908         if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5909                 return false;
5910
5911         return pqi_cmd_priv(scmd)->this_residual == 0;
5912 }
5913
5914 /*
5915  * This function gets called just before we hand the completed SCSI request
5916  * back to the SML.
5917  */
5918
5919 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5920 {
5921         struct pqi_scsi_dev *device;
5922         struct completion *wait;
5923
5924         if (!scmd->device) {
5925                 set_host_byte(scmd, DID_NO_CONNECT);
5926                 return;
5927         }
5928
5929         device = scmd->device->hostdata;
5930         if (!device) {
5931                 set_host_byte(scmd, DID_NO_CONNECT);
5932                 return;
5933         }
5934
5935         atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5936
5937         wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
5938         if (wait != PQI_NO_COMPLETION)
5939                 complete(wait);
5940 }
5941
5942 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5943         struct scsi_cmnd *scmd)
5944 {
5945         u32 oldest_jiffies;
5946         u8 lru_index;
5947         int i;
5948         int rc;
5949         struct pqi_scsi_dev *device;
5950         struct pqi_stream_data *pqi_stream_data;
5951         struct pqi_scsi_dev_raid_map_data rmd = { 0 };
5952
5953         if (!ctrl_info->enable_stream_detection)
5954                 return false;
5955
5956         rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5957         if (rc)
5958                 return false;
5959
5960         /* Check writes only. */
5961         if (!rmd.is_write)
5962                 return false;
5963
5964         device = scmd->device->hostdata;
5965
5966         /* Check for RAID 5/6 streams. */
5967         if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5968                 return false;
5969
5970         /*
5971          * If controller does not support AIO RAID{5,6} writes, need to send
5972          * requests down non-AIO path.
5973          */
5974         if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5975                 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5976                 return true;
5977
5978         lru_index = 0;
5979         oldest_jiffies = INT_MAX;
5980         for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5981                 pqi_stream_data = &device->stream_data[i];
5982                 /*
5983                  * Check for adjacent request or request is within
5984                  * the previous request.
5985                  */
5986                 if ((pqi_stream_data->next_lba &&
5987                         rmd.first_block >= pqi_stream_data->next_lba) &&
5988                         rmd.first_block <= pqi_stream_data->next_lba +
5989                                 rmd.block_cnt) {
5990                         pqi_stream_data->next_lba = rmd.first_block +
5991                                 rmd.block_cnt;
5992                         pqi_stream_data->last_accessed = jiffies;
5993                         per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
5994                         return true;
5995                 }
5996
5997                 /* unused entry */
5998                 if (pqi_stream_data->last_accessed == 0) {
5999                         lru_index = i;
6000                         break;
6001                 }
6002
6003                 /* Find entry with oldest last accessed time. */
6004                 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
6005                         oldest_jiffies = pqi_stream_data->last_accessed;
6006                         lru_index = i;
6007                 }
6008         }
6009
6010         /* Set LRU entry. */
6011         pqi_stream_data = &device->stream_data[lru_index];
6012         pqi_stream_data->last_accessed = jiffies;
6013         pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
6014
6015         return false;
6016 }
6017
6018 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6019 {
6020         int rc;
6021         struct pqi_ctrl_info *ctrl_info;
6022         struct pqi_scsi_dev *device;
6023         u16 hw_queue;
6024         struct pqi_queue_group *queue_group;
6025         bool raid_bypassed;
6026         u8 lun;
6027
6028         scmd->host_scribble = PQI_NO_COMPLETION;
6029
6030         device = scmd->device->hostdata;
6031
6032         if (!device) {
6033                 set_host_byte(scmd, DID_NO_CONNECT);
6034                 pqi_scsi_done(scmd);
6035                 return 0;
6036         }
6037
6038         lun = (u8)scmd->device->lun;
6039
6040         atomic_inc(&device->scsi_cmds_outstanding[lun]);
6041
6042         ctrl_info = shost_to_hba(shost);
6043
6044         if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
6045                 set_host_byte(scmd, DID_NO_CONNECT);
6046                 pqi_scsi_done(scmd);
6047                 return 0;
6048         }
6049
6050         if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
6051                 rc = SCSI_MLQUEUE_HOST_BUSY;
6052                 goto out;
6053         }
6054
6055         /*
6056          * This is necessary because the SML doesn't zero out this field during
6057          * error recovery.
6058          */
6059         scmd->result = 0;
6060
6061         hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6062         queue_group = &ctrl_info->queue_groups[hw_queue];
6063
6064         if (pqi_is_logical_device(device)) {
6065                 raid_bypassed = false;
6066                 if (device->raid_bypass_enabled &&
6067                         pqi_is_bypass_eligible_request(scmd) &&
6068                         !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6069                         rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6070                         if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
6071                                 raid_bypassed = true;
6072                                 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
6073                         }
6074                 }
6075                 if (!raid_bypassed)
6076                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6077         } else {
6078                 if (device->aio_enabled)
6079                         rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6080                 else
6081                         rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6082         }
6083
6084 out:
6085         if (rc) {
6086                 scmd->host_scribble = NULL;
6087                 atomic_dec(&device->scsi_cmds_outstanding[lun]);
6088         }
6089
6090         return rc;
6091 }
6092
6093 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6094 {
6095         unsigned int i;
6096         unsigned int path;
6097         unsigned long flags;
6098         unsigned int queued_io_count;
6099         struct pqi_queue_group *queue_group;
6100         struct pqi_io_request *io_request;
6101
6102         queued_io_count = 0;
6103
6104         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6105                 queue_group = &ctrl_info->queue_groups[i];
6106                 for (path = 0; path < 2; path++) {
6107                         spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6108                         list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6109                                 queued_io_count++;
6110                         spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6111                 }
6112         }
6113
6114         return queued_io_count;
6115 }
6116
6117 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6118 {
6119         unsigned int i;
6120         unsigned int path;
6121         unsigned int nonempty_inbound_queue_count;
6122         struct pqi_queue_group *queue_group;
6123         pqi_index_t iq_pi;
6124         pqi_index_t iq_ci;
6125
6126         nonempty_inbound_queue_count = 0;
6127
6128         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6129                 queue_group = &ctrl_info->queue_groups[i];
6130                 for (path = 0; path < 2; path++) {
6131                         iq_pi = queue_group->iq_pi_copy[path];
6132                         iq_ci = readl(queue_group->iq_ci[path]);
6133                         if (iq_ci != iq_pi)
6134                                 nonempty_inbound_queue_count++;
6135                 }
6136         }
6137
6138         return nonempty_inbound_queue_count;
6139 }
6140
6141 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS        10
6142
6143 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6144 {
6145         unsigned long start_jiffies;
6146         unsigned long warning_timeout;
6147         unsigned int queued_io_count;
6148         unsigned int nonempty_inbound_queue_count;
6149         bool displayed_warning;
6150
6151         displayed_warning = false;
6152         start_jiffies = jiffies;
6153         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6154
6155         while (1) {
6156                 queued_io_count = pqi_queued_io_count(ctrl_info);
6157                 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6158                 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6159                         break;
6160                 pqi_check_ctrl_health(ctrl_info);
6161                 if (pqi_ctrl_offline(ctrl_info))
6162                         return -ENXIO;
6163                 if (time_after(jiffies, warning_timeout)) {
6164                         dev_warn(&ctrl_info->pci_dev->dev,
6165                                 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6166                                 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6167                         displayed_warning = true;
6168                         warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6169                 }
6170                 usleep_range(1000, 2000);
6171         }
6172
6173         if (displayed_warning)
6174                 dev_warn(&ctrl_info->pci_dev->dev,
6175                         "queued I/O drained after waiting for %u seconds\n",
6176                         jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6177
6178         return 0;
6179 }
6180
6181 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6182         struct pqi_scsi_dev *device, u8 lun)
6183 {
6184         unsigned int i;
6185         unsigned int path;
6186         struct pqi_queue_group *queue_group;
6187         unsigned long flags;
6188         struct pqi_io_request *io_request;
6189         struct pqi_io_request *next;
6190         struct scsi_cmnd *scmd;
6191         struct pqi_scsi_dev *scsi_device;
6192
6193         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6194                 queue_group = &ctrl_info->queue_groups[i];
6195
6196                 for (path = 0; path < 2; path++) {
6197                         spin_lock_irqsave(
6198                                 &queue_group->submit_lock[path], flags);
6199
6200                         list_for_each_entry_safe(io_request, next,
6201                                 &queue_group->request_list[path],
6202                                 request_list_entry) {
6203
6204                                 scmd = io_request->scmd;
6205                                 if (!scmd)
6206                                         continue;
6207
6208                                 scsi_device = scmd->device->hostdata;
6209
6210                                 list_del(&io_request->request_list_entry);
6211                                 if (scsi_device == device && (u8)scmd->device->lun == lun)
6212                                         set_host_byte(scmd, DID_RESET);
6213                                 else
6214                                         set_host_byte(scmd, DID_REQUEUE);
6215                                 pqi_free_io_request(io_request);
6216                                 scsi_dma_unmap(scmd);
6217                                 pqi_scsi_done(scmd);
6218                         }
6219
6220                         spin_unlock_irqrestore(
6221                                 &queue_group->submit_lock[path], flags);
6222                 }
6223         }
6224 }
6225
6226 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS     10
6227
6228 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6229         struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6230 {
6231         int cmds_outstanding;
6232         unsigned long start_jiffies;
6233         unsigned long warning_timeout;
6234         unsigned long msecs_waiting;
6235
6236         start_jiffies = jiffies;
6237         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6238
6239         while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6240                 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6241                         pqi_check_ctrl_health(ctrl_info);
6242                         if (pqi_ctrl_offline(ctrl_info))
6243                                 return -ENXIO;
6244                 }
6245                 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6246                 if (msecs_waiting >= timeout_msecs) {
6247                         dev_err(&ctrl_info->pci_dev->dev,
6248                                 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6249                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6250                                 lun, msecs_waiting / 1000, cmds_outstanding);
6251                         return -ETIMEDOUT;
6252                 }
6253                 if (time_after(jiffies, warning_timeout)) {
6254                         dev_warn(&ctrl_info->pci_dev->dev,
6255                                 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6256                                 ctrl_info->scsi_host->host_no, device->bus, device->target,
6257                                 lun, msecs_waiting / 1000, cmds_outstanding);
6258                         warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6259                 }
6260                 usleep_range(1000, 2000);
6261         }
6262
6263         return 0;
6264 }
6265
6266 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6267         void *context)
6268 {
6269         struct completion *waiting = context;
6270
6271         complete(waiting);
6272 }
6273
6274 #define PQI_LUN_RESET_POLL_COMPLETION_SECS      10
6275
6276 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6277         struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6278 {
6279         int rc;
6280         unsigned int wait_secs;
6281         int cmds_outstanding;
6282
6283         wait_secs = 0;
6284
6285         while (1) {
6286                 if (wait_for_completion_io_timeout(wait,
6287                         PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6288                         rc = 0;
6289                         break;
6290                 }
6291
6292                 pqi_check_ctrl_health(ctrl_info);
6293                 if (pqi_ctrl_offline(ctrl_info)) {
6294                         rc = -ENXIO;
6295                         break;
6296                 }
6297
6298                 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6299                 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6300                 dev_warn(&ctrl_info->pci_dev->dev,
6301                         "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6302                         ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6303         }
6304
6305         return rc;
6306 }
6307
6308 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS     30
6309
6310 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6311 {
6312         int rc;
6313         struct pqi_io_request *io_request;
6314         DECLARE_COMPLETION_ONSTACK(wait);
6315         struct pqi_task_management_request *request;
6316
6317         io_request = pqi_alloc_io_request(ctrl_info, NULL);
6318         io_request->io_complete_callback = pqi_lun_reset_complete;
6319         io_request->context = &wait;
6320
6321         request = io_request->iu;
6322         memset(request, 0, sizeof(*request));
6323
6324         request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6325         put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6326                 &request->header.iu_length);
6327         put_unaligned_le16(io_request->index, &request->request_id);
6328         memcpy(request->lun_number, device->scsi3addr,
6329                 sizeof(request->lun_number));
6330         if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6331                 request->ml_device_lun_number = lun;
6332         request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6333         if (ctrl_info->tmf_iu_timeout_supported)
6334                 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6335
6336         pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6337                 io_request);
6338
6339         rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
6340         if (rc == 0)
6341                 rc = io_request->status;
6342
6343         pqi_free_io_request(io_request);
6344
6345         return rc;
6346 }
6347
6348 #define PQI_LUN_RESET_RETRIES                           3
6349 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS              (10 * 1000)
6350 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS          (10 * 60 * 1000)
6351 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS   (2 * 60 * 1000)
6352
6353 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6354 {
6355         int reset_rc;
6356         int wait_rc;
6357         unsigned int retries;
6358         unsigned long timeout_msecs;
6359
6360         for (retries = 0;;) {
6361                 reset_rc = pqi_lun_reset(ctrl_info, device, lun);
6362                 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
6363                         break;
6364                 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6365         }
6366
6367         timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6368                 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6369
6370         wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
6371         if (wait_rc && reset_rc == 0)
6372                 reset_rc = wait_rc;
6373
6374         return reset_rc == 0 ? SUCCESS : FAILED;
6375 }
6376
6377 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6378 {
6379         int rc;
6380
6381         pqi_ctrl_block_requests(ctrl_info);
6382         pqi_ctrl_wait_until_quiesced(ctrl_info);
6383         pqi_fail_io_queued_for_device(ctrl_info, device, lun);
6384         rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6385         pqi_device_reset_start(device, lun);
6386         pqi_ctrl_unblock_requests(ctrl_info);
6387         if (rc)
6388                 rc = FAILED;
6389         else
6390                 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
6391         pqi_device_reset_done(device, lun);
6392
6393         return rc;
6394 }
6395
6396 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
6397 {
6398         int rc;
6399
6400         mutex_lock(&ctrl_info->lun_reset_mutex);
6401
6402         dev_err(&ctrl_info->pci_dev->dev,
6403                 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
6404                 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
6405
6406         pqi_check_ctrl_health(ctrl_info);
6407         if (pqi_ctrl_offline(ctrl_info))
6408                 rc = FAILED;
6409         else
6410                 rc = pqi_device_reset(ctrl_info, device, lun);
6411
6412         dev_err(&ctrl_info->pci_dev->dev,
6413                 "reset of scsi %d:%d:%d:%u: %s\n",
6414                 ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
6415                 rc == SUCCESS ? "SUCCESS" : "FAILED");
6416
6417         mutex_unlock(&ctrl_info->lun_reset_mutex);
6418
6419         return rc;
6420 }
6421
6422 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6423 {
6424         struct Scsi_Host *shost;
6425         struct pqi_ctrl_info *ctrl_info;
6426         struct pqi_scsi_dev *device;
6427         u8 scsi_opcode;
6428
6429         shost = scmd->device->host;
6430         ctrl_info = shost_to_hba(shost);
6431         device = scmd->device->hostdata;
6432         scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6433
6434         return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
6435 }
6436
6437 static void pqi_tmf_worker(struct work_struct *work)
6438 {
6439         struct pqi_tmf_work *tmf_work;
6440         struct scsi_cmnd *scmd;
6441
6442         tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
6443         scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
6444
6445         pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
6446 }
6447
6448 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
6449 {
6450         struct Scsi_Host *shost;
6451         struct pqi_ctrl_info *ctrl_info;
6452         struct pqi_scsi_dev *device;
6453         struct pqi_tmf_work *tmf_work;
6454         DECLARE_COMPLETION_ONSTACK(wait);
6455
6456         shost = scmd->device->host;
6457         ctrl_info = shost_to_hba(shost);
6458         device = scmd->device->hostdata;
6459
6460         dev_err(&ctrl_info->pci_dev->dev,
6461                 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
6462                 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6463
6464         if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
6465                 dev_err(&ctrl_info->pci_dev->dev,
6466                         "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
6467                         shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6468                 scmd->result = DID_RESET << 16;
6469                 goto out;
6470         }
6471
6472         tmf_work = &device->tmf_work[scmd->device->lun];
6473
6474         if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
6475                 tmf_work->ctrl_info = ctrl_info;
6476                 tmf_work->device = device;
6477                 tmf_work->lun = (u8)scmd->device->lun;
6478                 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6479                 schedule_work(&tmf_work->work_struct);
6480         }
6481
6482         wait_for_completion(&wait);
6483
6484         dev_err(&ctrl_info->pci_dev->dev,
6485                 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
6486                 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6487
6488 out:
6489
6490         return SUCCESS;
6491 }
6492
6493 static int pqi_slave_alloc(struct scsi_device *sdev)
6494 {
6495         struct pqi_scsi_dev *device;
6496         unsigned long flags;
6497         struct pqi_ctrl_info *ctrl_info;
6498         struct scsi_target *starget;
6499         struct sas_rphy *rphy;
6500
6501         ctrl_info = shost_to_hba(sdev->host);
6502
6503         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6504
6505         if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6506                 starget = scsi_target(sdev);
6507                 rphy = target_to_rphy(starget);
6508                 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6509                 if (device) {
6510                         if (device->target_lun_valid) {
6511                                 device->ignore_device = true;
6512                         } else {
6513                                 device->target = sdev_id(sdev);
6514                                 device->lun = sdev->lun;
6515                                 device->target_lun_valid = true;
6516                         }
6517                 }
6518         } else {
6519                 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6520                         sdev_id(sdev), sdev->lun);
6521         }
6522
6523         if (device) {
6524                 sdev->hostdata = device;
6525                 device->sdev = sdev;
6526                 if (device->queue_depth) {
6527                         device->advertised_queue_depth = device->queue_depth;
6528                         scsi_change_queue_depth(sdev,
6529                                 device->advertised_queue_depth);
6530                 }
6531                 if (pqi_is_logical_device(device)) {
6532                         pqi_disable_write_same(sdev);
6533                 } else {
6534                         sdev->allow_restart = 1;
6535                         if (device->device_type == SA_DEVICE_TYPE_NVME)
6536                                 pqi_disable_write_same(sdev);
6537                 }
6538         }
6539
6540         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6541
6542         return 0;
6543 }
6544
6545 static void pqi_map_queues(struct Scsi_Host *shost)
6546 {
6547         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6548
6549         if (!ctrl_info->disable_managed_interrupts)
6550                 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6551                               ctrl_info->pci_dev, 0);
6552         else
6553                 return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
6554 }
6555
6556 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6557 {
6558         return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6559 }
6560
6561 static int pqi_slave_configure(struct scsi_device *sdev)
6562 {
6563         int rc = 0;
6564         struct pqi_scsi_dev *device;
6565
6566         device = sdev->hostdata;
6567         device->devtype = sdev->type;
6568
6569         if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6570                 rc = -ENXIO;
6571                 device->ignore_device = false;
6572         }
6573
6574         return rc;
6575 }
6576
6577 static void pqi_slave_destroy(struct scsi_device *sdev)
6578 {
6579         struct pqi_ctrl_info *ctrl_info;
6580         struct pqi_scsi_dev *device;
6581         int mutex_acquired;
6582         unsigned long flags;
6583
6584         ctrl_info = shost_to_hba(sdev->host);
6585
6586         mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6587         if (!mutex_acquired)
6588                 return;
6589
6590         device = sdev->hostdata;
6591         if (!device) {
6592                 mutex_unlock(&ctrl_info->scan_mutex);
6593                 return;
6594         }
6595
6596         device->lun_count--;
6597         if (device->lun_count > 0) {
6598                 mutex_unlock(&ctrl_info->scan_mutex);
6599                 return;
6600         }
6601
6602         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6603         list_del(&device->scsi_device_list_entry);
6604         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6605
6606         mutex_unlock(&ctrl_info->scan_mutex);
6607
6608         pqi_dev_info(ctrl_info, "removed", device);
6609         pqi_free_device(device);
6610 }
6611
6612 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6613 {
6614         struct pci_dev *pci_dev;
6615         u32 subsystem_vendor;
6616         u32 subsystem_device;
6617         cciss_pci_info_struct pci_info;
6618
6619         if (!arg)
6620                 return -EINVAL;
6621
6622         pci_dev = ctrl_info->pci_dev;
6623
6624         pci_info.domain = pci_domain_nr(pci_dev->bus);
6625         pci_info.bus = pci_dev->bus->number;
6626         pci_info.dev_fn = pci_dev->devfn;
6627         subsystem_vendor = pci_dev->subsystem_vendor;
6628         subsystem_device = pci_dev->subsystem_device;
6629         pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6630
6631         if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
6632                 return -EFAULT;
6633
6634         return 0;
6635 }
6636
6637 static int pqi_getdrivver_ioctl(void __user *arg)
6638 {
6639         u32 version;
6640
6641         if (!arg)
6642                 return -EINVAL;
6643
6644         version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6645                 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6646
6647         if (copy_to_user(arg, &version, sizeof(version)))
6648                 return -EFAULT;
6649
6650         return 0;
6651 }
6652
6653 struct ciss_error_info {
6654         u8      scsi_status;
6655         int     command_status;
6656         size_t  sense_data_length;
6657 };
6658
6659 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6660         struct ciss_error_info *ciss_error_info)
6661 {
6662         int ciss_cmd_status;
6663         size_t sense_data_length;
6664
6665         switch (pqi_error_info->data_out_result) {
6666         case PQI_DATA_IN_OUT_GOOD:
6667                 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6668                 break;
6669         case PQI_DATA_IN_OUT_UNDERFLOW:
6670                 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6671                 break;
6672         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6673                 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6674                 break;
6675         case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6676         case PQI_DATA_IN_OUT_BUFFER_ERROR:
6677         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6678         case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6679         case PQI_DATA_IN_OUT_ERROR:
6680                 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6681                 break;
6682         case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6683         case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6684         case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6685         case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6686         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6687         case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6688         case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6689         case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6690         case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6691         case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6692                 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6693                 break;
6694         case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6695                 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6696                 break;
6697         case PQI_DATA_IN_OUT_ABORTED:
6698                 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6699                 break;
6700         case PQI_DATA_IN_OUT_TIMEOUT:
6701                 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6702                 break;
6703         default:
6704                 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6705                 break;
6706         }
6707
6708         sense_data_length =
6709                 get_unaligned_le16(&pqi_error_info->sense_data_length);
6710         if (sense_data_length == 0)
6711                 sense_data_length =
6712                 get_unaligned_le16(&pqi_error_info->response_data_length);
6713         if (sense_data_length)
6714                 if (sense_data_length > sizeof(pqi_error_info->data))
6715                         sense_data_length = sizeof(pqi_error_info->data);
6716
6717         ciss_error_info->scsi_status = pqi_error_info->status;
6718         ciss_error_info->command_status = ciss_cmd_status;
6719         ciss_error_info->sense_data_length = sense_data_length;
6720 }
6721
6722 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6723 {
6724         int rc;
6725         char *kernel_buffer = NULL;
6726         u16 iu_length;
6727         size_t sense_data_length;
6728         IOCTL_Command_struct iocommand;
6729         struct pqi_raid_path_request request;
6730         struct pqi_raid_error_info pqi_error_info;
6731         struct ciss_error_info ciss_error_info;
6732
6733         if (pqi_ctrl_offline(ctrl_info))
6734                 return -ENXIO;
6735         if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6736                 return -EBUSY;
6737         if (!arg)
6738                 return -EINVAL;
6739         if (!capable(CAP_SYS_RAWIO))
6740                 return -EPERM;
6741         if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6742                 return -EFAULT;
6743         if (iocommand.buf_size < 1 &&
6744                 iocommand.Request.Type.Direction != XFER_NONE)
6745                 return -EINVAL;
6746         if (iocommand.Request.CDBLen > sizeof(request.cdb))
6747                 return -EINVAL;
6748         if (iocommand.Request.Type.Type != TYPE_CMD)
6749                 return -EINVAL;
6750
6751         switch (iocommand.Request.Type.Direction) {
6752         case XFER_NONE:
6753         case XFER_WRITE:
6754         case XFER_READ:
6755         case XFER_READ | XFER_WRITE:
6756                 break;
6757         default:
6758                 return -EINVAL;
6759         }
6760
6761         if (iocommand.buf_size > 0) {
6762                 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6763                 if (!kernel_buffer)
6764                         return -ENOMEM;
6765                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6766                         if (copy_from_user(kernel_buffer, iocommand.buf,
6767                                 iocommand.buf_size)) {
6768                                 rc = -EFAULT;
6769                                 goto out;
6770                         }
6771                 } else {
6772                         memset(kernel_buffer, 0, iocommand.buf_size);
6773                 }
6774         }
6775
6776         memset(&request, 0, sizeof(request));
6777
6778         request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6779         iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6780                 PQI_REQUEST_HEADER_LENGTH;
6781         memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6782                 sizeof(request.lun_number));
6783         memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6784         request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6785
6786         switch (iocommand.Request.Type.Direction) {
6787         case XFER_NONE:
6788                 request.data_direction = SOP_NO_DIRECTION_FLAG;
6789                 break;
6790         case XFER_WRITE:
6791                 request.data_direction = SOP_WRITE_FLAG;
6792                 break;
6793         case XFER_READ:
6794                 request.data_direction = SOP_READ_FLAG;
6795                 break;
6796         case XFER_READ | XFER_WRITE:
6797                 request.data_direction = SOP_BIDIRECTIONAL;
6798                 break;
6799         }
6800
6801         request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6802
6803         if (iocommand.buf_size > 0) {
6804                 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6805
6806                 rc = pqi_map_single(ctrl_info->pci_dev,
6807                         &request.sg_descriptors[0], kernel_buffer,
6808                         iocommand.buf_size, DMA_BIDIRECTIONAL);
6809                 if (rc)
6810                         goto out;
6811
6812                 iu_length += sizeof(request.sg_descriptors[0]);
6813         }
6814
6815         put_unaligned_le16(iu_length, &request.header.iu_length);
6816
6817         if (ctrl_info->raid_iu_timeout_supported)
6818                 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6819
6820         rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6821                 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6822
6823         if (iocommand.buf_size > 0)
6824                 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6825                         DMA_BIDIRECTIONAL);
6826
6827         memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6828
6829         if (rc == 0) {
6830                 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6831                 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6832                 iocommand.error_info.CommandStatus =
6833                         ciss_error_info.command_status;
6834                 sense_data_length = ciss_error_info.sense_data_length;
6835                 if (sense_data_length) {
6836                         if (sense_data_length >
6837                                 sizeof(iocommand.error_info.SenseInfo))
6838                                 sense_data_length =
6839                                         sizeof(iocommand.error_info.SenseInfo);
6840                         memcpy(iocommand.error_info.SenseInfo,
6841                                 pqi_error_info.data, sense_data_length);
6842                         iocommand.error_info.SenseLen = sense_data_length;
6843                 }
6844         }
6845
6846         if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6847                 rc = -EFAULT;
6848                 goto out;
6849         }
6850
6851         if (rc == 0 && iocommand.buf_size > 0 &&
6852                 (iocommand.Request.Type.Direction & XFER_READ)) {
6853                 if (copy_to_user(iocommand.buf, kernel_buffer,
6854                         iocommand.buf_size)) {
6855                         rc = -EFAULT;
6856                 }
6857         }
6858
6859 out:
6860         kfree(kernel_buffer);
6861
6862         return rc;
6863 }
6864
6865 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6866                      void __user *arg)
6867 {
6868         int rc;
6869         struct pqi_ctrl_info *ctrl_info;
6870
6871         ctrl_info = shost_to_hba(sdev->host);
6872
6873         switch (cmd) {
6874         case CCISS_DEREGDISK:
6875         case CCISS_REGNEWDISK:
6876         case CCISS_REGNEWD:
6877                 rc = pqi_scan_scsi_devices(ctrl_info);
6878                 break;
6879         case CCISS_GETPCIINFO:
6880                 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6881                 break;
6882         case CCISS_GETDRIVVER:
6883                 rc = pqi_getdrivver_ioctl(arg);
6884                 break;
6885         case CCISS_PASSTHRU:
6886                 rc = pqi_passthru_ioctl(ctrl_info, arg);
6887                 break;
6888         default:
6889                 rc = -EINVAL;
6890                 break;
6891         }
6892
6893         return rc;
6894 }
6895
6896 static ssize_t pqi_firmware_version_show(struct device *dev,
6897         struct device_attribute *attr, char *buffer)
6898 {
6899         struct Scsi_Host *shost;
6900         struct pqi_ctrl_info *ctrl_info;
6901
6902         shost = class_to_shost(dev);
6903         ctrl_info = shost_to_hba(shost);
6904
6905         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6906 }
6907
6908 static ssize_t pqi_serial_number_show(struct device *dev,
6909         struct device_attribute *attr, char *buffer)
6910 {
6911         struct Scsi_Host *shost;
6912         struct pqi_ctrl_info *ctrl_info;
6913
6914         shost = class_to_shost(dev);
6915         ctrl_info = shost_to_hba(shost);
6916
6917         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6918 }
6919
6920 static ssize_t pqi_model_show(struct device *dev,
6921         struct device_attribute *attr, char *buffer)
6922 {
6923         struct Scsi_Host *shost;
6924         struct pqi_ctrl_info *ctrl_info;
6925
6926         shost = class_to_shost(dev);
6927         ctrl_info = shost_to_hba(shost);
6928
6929         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6930 }
6931
6932 static ssize_t pqi_vendor_show(struct device *dev,
6933         struct device_attribute *attr, char *buffer)
6934 {
6935         struct Scsi_Host *shost;
6936         struct pqi_ctrl_info *ctrl_info;
6937
6938         shost = class_to_shost(dev);
6939         ctrl_info = shost_to_hba(shost);
6940
6941         return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6942 }
6943
6944 static ssize_t pqi_host_rescan_store(struct device *dev,
6945         struct device_attribute *attr, const char *buffer, size_t count)
6946 {
6947         struct Scsi_Host *shost = class_to_shost(dev);
6948
6949         pqi_scan_start(shost);
6950
6951         return count;
6952 }
6953
6954 static ssize_t pqi_lockup_action_show(struct device *dev,
6955         struct device_attribute *attr, char *buffer)
6956 {
6957         int count = 0;
6958         unsigned int i;
6959
6960         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6961                 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6962                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6963                                 "[%s] ", pqi_lockup_actions[i].name);
6964                 else
6965                         count += scnprintf(buffer + count, PAGE_SIZE - count,
6966                                 "%s ", pqi_lockup_actions[i].name);
6967         }
6968
6969         count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6970
6971         return count;
6972 }
6973
6974 static ssize_t pqi_lockup_action_store(struct device *dev,
6975         struct device_attribute *attr, const char *buffer, size_t count)
6976 {
6977         unsigned int i;
6978         char *action_name;
6979         char action_name_buffer[32];
6980
6981         strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6982         action_name = strstrip(action_name_buffer);
6983
6984         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6985                 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6986                         pqi_lockup_action = pqi_lockup_actions[i].action;
6987                         return count;
6988                 }
6989         }
6990
6991         return -EINVAL;
6992 }
6993
6994 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6995         struct device_attribute *attr, char *buffer)
6996 {
6997         struct Scsi_Host *shost = class_to_shost(dev);
6998         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6999
7000         return scnprintf(buffer, 10, "%x\n",
7001                         ctrl_info->enable_stream_detection);
7002 }
7003
7004 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
7005         struct device_attribute *attr, const char *buffer, size_t count)
7006 {
7007         struct Scsi_Host *shost = class_to_shost(dev);
7008         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7009         u8 set_stream_detection = 0;
7010
7011         if (kstrtou8(buffer, 0, &set_stream_detection))
7012                 return -EINVAL;
7013
7014         if (set_stream_detection > 0)
7015                 set_stream_detection = 1;
7016
7017         ctrl_info->enable_stream_detection = set_stream_detection;
7018
7019         return count;
7020 }
7021
7022 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
7023         struct device_attribute *attr, char *buffer)
7024 {
7025         struct Scsi_Host *shost = class_to_shost(dev);
7026         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7027
7028         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
7029 }
7030
7031 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
7032         struct device_attribute *attr, const char *buffer, size_t count)
7033 {
7034         struct Scsi_Host *shost = class_to_shost(dev);
7035         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7036         u8 set_r5_writes = 0;
7037
7038         if (kstrtou8(buffer, 0, &set_r5_writes))
7039                 return -EINVAL;
7040
7041         if (set_r5_writes > 0)
7042                 set_r5_writes = 1;
7043
7044         ctrl_info->enable_r5_writes = set_r5_writes;
7045
7046         return count;
7047 }
7048
7049 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
7050         struct device_attribute *attr, char *buffer)
7051 {
7052         struct Scsi_Host *shost = class_to_shost(dev);
7053         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7054
7055         return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
7056 }
7057
7058 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
7059         struct device_attribute *attr, const char *buffer, size_t count)
7060 {
7061         struct Scsi_Host *shost = class_to_shost(dev);
7062         struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7063         u8 set_r6_writes = 0;
7064
7065         if (kstrtou8(buffer, 0, &set_r6_writes))
7066                 return -EINVAL;
7067
7068         if (set_r6_writes > 0)
7069                 set_r6_writes = 1;
7070
7071         ctrl_info->enable_r6_writes = set_r6_writes;
7072
7073         return count;
7074 }
7075
7076 static DEVICE_STRING_ATTR_RO(driver_version, 0444,
7077         DRIVER_VERSION BUILD_TIMESTAMP);
7078 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
7079 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
7080 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
7081 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
7082 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
7083 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
7084         pqi_lockup_action_store);
7085 static DEVICE_ATTR(enable_stream_detection, 0644,
7086         pqi_host_enable_stream_detection_show,
7087         pqi_host_enable_stream_detection_store);
7088 static DEVICE_ATTR(enable_r5_writes, 0644,
7089         pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
7090 static DEVICE_ATTR(enable_r6_writes, 0644,
7091         pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
7092
7093 static struct attribute *pqi_shost_attrs[] = {
7094         &dev_attr_driver_version.attr.attr,
7095         &dev_attr_firmware_version.attr,
7096         &dev_attr_model.attr,
7097         &dev_attr_serial_number.attr,
7098         &dev_attr_vendor.attr,
7099         &dev_attr_rescan.attr,
7100         &dev_attr_lockup_action.attr,
7101         &dev_attr_enable_stream_detection.attr,
7102         &dev_attr_enable_r5_writes.attr,
7103         &dev_attr_enable_r6_writes.attr,
7104         NULL
7105 };
7106
7107 ATTRIBUTE_GROUPS(pqi_shost);
7108
7109 static ssize_t pqi_unique_id_show(struct device *dev,
7110         struct device_attribute *attr, char *buffer)
7111 {
7112         struct pqi_ctrl_info *ctrl_info;
7113         struct scsi_device *sdev;
7114         struct pqi_scsi_dev *device;
7115         unsigned long flags;
7116         u8 unique_id[16];
7117
7118         sdev = to_scsi_device(dev);
7119         ctrl_info = shost_to_hba(sdev->host);
7120
7121         if (pqi_ctrl_offline(ctrl_info))
7122                 return -ENODEV;
7123
7124         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7125
7126         device = sdev->hostdata;
7127         if (!device) {
7128                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7129                 return -ENODEV;
7130         }
7131
7132         if (device->is_physical_device)
7133                 memcpy(unique_id, device->wwid, sizeof(device->wwid));
7134         else
7135                 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
7136
7137         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7138
7139         return scnprintf(buffer, PAGE_SIZE,
7140                 "%02X%02X%02X%02X%02X%02X%02X%02X"
7141                 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7142                 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7143                 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7144                 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7145                 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7146 }
7147
7148 static ssize_t pqi_lunid_show(struct device *dev,
7149         struct device_attribute *attr, char *buffer)
7150 {
7151         struct pqi_ctrl_info *ctrl_info;
7152         struct scsi_device *sdev;
7153         struct pqi_scsi_dev *device;
7154         unsigned long flags;
7155         u8 lunid[8];
7156
7157         sdev = to_scsi_device(dev);
7158         ctrl_info = shost_to_hba(sdev->host);
7159
7160         if (pqi_ctrl_offline(ctrl_info))
7161                 return -ENODEV;
7162
7163         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7164
7165         device = sdev->hostdata;
7166         if (!device) {
7167                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7168                 return -ENODEV;
7169         }
7170
7171         memcpy(lunid, device->scsi3addr, sizeof(lunid));
7172
7173         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7174
7175         return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7176 }
7177
7178 #define MAX_PATHS       8
7179
7180 static ssize_t pqi_path_info_show(struct device *dev,
7181         struct device_attribute *attr, char *buf)
7182 {
7183         struct pqi_ctrl_info *ctrl_info;
7184         struct scsi_device *sdev;
7185         struct pqi_scsi_dev *device;
7186         unsigned long flags;
7187         int i;
7188         int output_len = 0;
7189         u8 box;
7190         u8 bay;
7191         u8 path_map_index;
7192         char *active;
7193         u8 phys_connector[2];
7194
7195         sdev = to_scsi_device(dev);
7196         ctrl_info = shost_to_hba(sdev->host);
7197
7198         if (pqi_ctrl_offline(ctrl_info))
7199                 return -ENODEV;
7200
7201         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7202
7203         device = sdev->hostdata;
7204         if (!device) {
7205                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7206                 return -ENODEV;
7207         }
7208
7209         bay = device->bay;
7210         for (i = 0; i < MAX_PATHS; i++) {
7211                 path_map_index = 1 << i;
7212                 if (i == device->active_path_index)
7213                         active = "Active";
7214                 else if (device->path_map & path_map_index)
7215                         active = "Inactive";
7216                 else
7217                         continue;
7218
7219                 output_len += scnprintf(buf + output_len,
7220                                         PAGE_SIZE - output_len,
7221                                         "[%d:%d:%d:%d] %20.20s ",
7222                                         ctrl_info->scsi_host->host_no,
7223                                         device->bus, device->target,
7224                                         device->lun,
7225                                         scsi_device_type(device->devtype));
7226
7227                 if (device->devtype == TYPE_RAID ||
7228                         pqi_is_logical_device(device))
7229                         goto end_buffer;
7230
7231                 memcpy(&phys_connector, &device->phys_connector[i],
7232                         sizeof(phys_connector));
7233                 if (phys_connector[0] < '0')
7234                         phys_connector[0] = '0';
7235                 if (phys_connector[1] < '0')
7236                         phys_connector[1] = '0';
7237
7238                 output_len += scnprintf(buf + output_len,
7239                                         PAGE_SIZE - output_len,
7240                                         "PORT: %.2s ", phys_connector);
7241
7242                 box = device->box[i];
7243                 if (box != 0 && box != 0xFF)
7244                         output_len += scnprintf(buf + output_len,
7245                                                 PAGE_SIZE - output_len,
7246                                                 "BOX: %hhu ", box);
7247
7248                 if ((device->devtype == TYPE_DISK ||
7249                         device->devtype == TYPE_ZBC) &&
7250                         pqi_expose_device(device))
7251                         output_len += scnprintf(buf + output_len,
7252                                                 PAGE_SIZE - output_len,
7253                                                 "BAY: %hhu ", bay);
7254
7255 end_buffer:
7256                 output_len += scnprintf(buf + output_len,
7257                                         PAGE_SIZE - output_len,
7258                                         "%s\n", active);
7259         }
7260
7261         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7262
7263         return output_len;
7264 }
7265
7266 static ssize_t pqi_sas_address_show(struct device *dev,
7267         struct device_attribute *attr, char *buffer)
7268 {
7269         struct pqi_ctrl_info *ctrl_info;
7270         struct scsi_device *sdev;
7271         struct pqi_scsi_dev *device;
7272         unsigned long flags;
7273         u64 sas_address;
7274
7275         sdev = to_scsi_device(dev);
7276         ctrl_info = shost_to_hba(sdev->host);
7277
7278         if (pqi_ctrl_offline(ctrl_info))
7279                 return -ENODEV;
7280
7281         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7282
7283         device = sdev->hostdata;
7284         if (!device) {
7285                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7286                 return -ENODEV;
7287         }
7288
7289         sas_address = device->sas_address;
7290
7291         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7292
7293         return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7294 }
7295
7296 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7297         struct device_attribute *attr, char *buffer)
7298 {
7299         struct pqi_ctrl_info *ctrl_info;
7300         struct scsi_device *sdev;
7301         struct pqi_scsi_dev *device;
7302         unsigned long flags;
7303
7304         sdev = to_scsi_device(dev);
7305         ctrl_info = shost_to_hba(sdev->host);
7306
7307         if (pqi_ctrl_offline(ctrl_info))
7308                 return -ENODEV;
7309
7310         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7311
7312         device = sdev->hostdata;
7313         if (!device) {
7314                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7315                 return -ENODEV;
7316         }
7317
7318         buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7319         buffer[1] = '\n';
7320         buffer[2] = '\0';
7321
7322         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7323
7324         return 2;
7325 }
7326
7327 static ssize_t pqi_raid_level_show(struct device *dev,
7328         struct device_attribute *attr, char *buffer)
7329 {
7330         struct pqi_ctrl_info *ctrl_info;
7331         struct scsi_device *sdev;
7332         struct pqi_scsi_dev *device;
7333         unsigned long flags;
7334         char *raid_level;
7335
7336         sdev = to_scsi_device(dev);
7337         ctrl_info = shost_to_hba(sdev->host);
7338
7339         if (pqi_ctrl_offline(ctrl_info))
7340                 return -ENODEV;
7341
7342         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7343
7344         device = sdev->hostdata;
7345         if (!device) {
7346                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7347                 return -ENODEV;
7348         }
7349
7350         if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7351                 raid_level = pqi_raid_level_to_string(device->raid_level);
7352         else
7353                 raid_level = "N/A";
7354
7355         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7356
7357         return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7358 }
7359
7360 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7361         struct device_attribute *attr, char *buffer)
7362 {
7363         struct pqi_ctrl_info *ctrl_info;
7364         struct scsi_device *sdev;
7365         struct pqi_scsi_dev *device;
7366         unsigned long flags;
7367         u64 raid_bypass_cnt;
7368         int cpu;
7369
7370         sdev = to_scsi_device(dev);
7371         ctrl_info = shost_to_hba(sdev->host);
7372
7373         if (pqi_ctrl_offline(ctrl_info))
7374                 return -ENODEV;
7375
7376         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7377
7378         device = sdev->hostdata;
7379         if (!device) {
7380                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7381                 return -ENODEV;
7382         }
7383
7384         raid_bypass_cnt = 0;
7385
7386         if (device->raid_io_stats) {
7387                 for_each_online_cpu(cpu) {
7388                         raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
7389                 }
7390         }
7391
7392         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7393
7394         return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
7395 }
7396
7397 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7398                 struct device_attribute *attr, char *buf)
7399 {
7400         struct pqi_ctrl_info *ctrl_info;
7401         struct scsi_device *sdev;
7402         struct pqi_scsi_dev *device;
7403         unsigned long flags;
7404         int output_len = 0;
7405
7406         sdev = to_scsi_device(dev);
7407         ctrl_info = shost_to_hba(sdev->host);
7408
7409         if (pqi_ctrl_offline(ctrl_info))
7410                 return -ENODEV;
7411
7412         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7413
7414         device = sdev->hostdata;
7415         if (!device) {
7416                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7417                 return -ENODEV;
7418         }
7419
7420         output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7421                                 device->ncq_prio_enable);
7422         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7423
7424         return output_len;
7425 }
7426
7427 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7428                         struct device_attribute *attr,
7429                         const char *buf, size_t count)
7430 {
7431         struct pqi_ctrl_info *ctrl_info;
7432         struct scsi_device *sdev;
7433         struct pqi_scsi_dev *device;
7434         unsigned long flags;
7435         u8 ncq_prio_enable = 0;
7436
7437         if (kstrtou8(buf, 0, &ncq_prio_enable))
7438                 return -EINVAL;
7439
7440         sdev = to_scsi_device(dev);
7441         ctrl_info = shost_to_hba(sdev->host);
7442
7443         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7444
7445         device = sdev->hostdata;
7446
7447         if (!device) {
7448                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7449                 return -ENODEV;
7450         }
7451
7452         if (!device->ncq_prio_support) {
7453                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7454                 return -EINVAL;
7455         }
7456
7457         device->ncq_prio_enable = ncq_prio_enable;
7458
7459         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7460
7461         return  strlen(buf);
7462 }
7463
7464 static ssize_t pqi_numa_node_show(struct device *dev,
7465         struct device_attribute *attr, char *buffer)
7466 {
7467         struct scsi_device *sdev;
7468         struct pqi_ctrl_info *ctrl_info;
7469
7470         sdev = to_scsi_device(dev);
7471         ctrl_info = shost_to_hba(sdev->host);
7472
7473         return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7474 }
7475
7476 static ssize_t pqi_write_stream_cnt_show(struct device *dev,
7477         struct device_attribute *attr, char *buffer)
7478 {
7479         struct pqi_ctrl_info *ctrl_info;
7480         struct scsi_device *sdev;
7481         struct pqi_scsi_dev *device;
7482         unsigned long flags;
7483         u64 write_stream_cnt;
7484         int cpu;
7485
7486         sdev = to_scsi_device(dev);
7487         ctrl_info = shost_to_hba(sdev->host);
7488
7489         if (pqi_ctrl_offline(ctrl_info))
7490                 return -ENODEV;
7491
7492         spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7493
7494         device = sdev->hostdata;
7495         if (!device) {
7496                 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7497                 return -ENODEV;
7498         }
7499
7500         write_stream_cnt = 0;
7501
7502         if (device->raid_io_stats) {
7503                 for_each_online_cpu(cpu) {
7504                         write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
7505                 }
7506         }
7507
7508         spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7509
7510         return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
7511 }
7512
7513 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7514 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7515 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7516 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7517 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7518 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7519 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7520 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7521                 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7522 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7523 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
7524
7525 static struct attribute *pqi_sdev_attrs[] = {
7526         &dev_attr_lunid.attr,
7527         &dev_attr_unique_id.attr,
7528         &dev_attr_path_info.attr,
7529         &dev_attr_sas_address.attr,
7530         &dev_attr_ssd_smart_path_enabled.attr,
7531         &dev_attr_raid_level.attr,
7532         &dev_attr_raid_bypass_cnt.attr,
7533         &dev_attr_sas_ncq_prio_enable.attr,
7534         &dev_attr_numa_node.attr,
7535         &dev_attr_write_stream_cnt.attr,
7536         NULL
7537 };
7538
7539 ATTRIBUTE_GROUPS(pqi_sdev);
7540
7541 static const struct scsi_host_template pqi_driver_template = {
7542         .module = THIS_MODULE,
7543         .name = DRIVER_NAME_SHORT,
7544         .proc_name = DRIVER_NAME_SHORT,
7545         .queuecommand = pqi_scsi_queue_command,
7546         .scan_start = pqi_scan_start,
7547         .scan_finished = pqi_scan_finished,
7548         .this_id = -1,
7549         .eh_device_reset_handler = pqi_eh_device_reset_handler,
7550         .eh_abort_handler = pqi_eh_abort_handler,
7551         .ioctl = pqi_ioctl,
7552         .slave_alloc = pqi_slave_alloc,
7553         .slave_configure = pqi_slave_configure,
7554         .slave_destroy = pqi_slave_destroy,
7555         .map_queues = pqi_map_queues,
7556         .sdev_groups = pqi_sdev_groups,
7557         .shost_groups = pqi_shost_groups,
7558         .cmd_size = sizeof(struct pqi_cmd_priv),
7559 };
7560
7561 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7562 {
7563         int rc;
7564         struct Scsi_Host *shost;
7565
7566         shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7567         if (!shost) {
7568                 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7569                 return -ENOMEM;
7570         }
7571
7572         shost->io_port = 0;
7573         shost->n_io_port = 0;
7574         shost->this_id = -1;
7575         shost->max_channel = PQI_MAX_BUS;
7576         shost->max_cmd_len = MAX_COMMAND_SIZE;
7577         shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7578         shost->max_id = ~0;
7579         shost->max_sectors = ctrl_info->max_sectors;
7580         shost->can_queue = ctrl_info->scsi_ml_can_queue;
7581         shost->cmd_per_lun = shost->can_queue;
7582         shost->sg_tablesize = ctrl_info->sg_tablesize;
7583         shost->transportt = pqi_sas_transport_template;
7584         shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7585         shost->unique_id = shost->irq;
7586         shost->nr_hw_queues = ctrl_info->num_queue_groups;
7587         shost->host_tagset = 1;
7588         shost->hostdata[0] = (unsigned long)ctrl_info;
7589
7590         rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7591         if (rc) {
7592                 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7593                 goto free_host;
7594         }
7595
7596         rc = pqi_add_sas_host(shost, ctrl_info);
7597         if (rc) {
7598                 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7599                 goto remove_host;
7600         }
7601
7602         ctrl_info->scsi_host = shost;
7603
7604         return 0;
7605
7606 remove_host:
7607         scsi_remove_host(shost);
7608 free_host:
7609         scsi_host_put(shost);
7610
7611         return rc;
7612 }
7613
7614 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7615 {
7616         struct Scsi_Host *shost;
7617
7618         pqi_delete_sas_host(ctrl_info);
7619
7620         shost = ctrl_info->scsi_host;
7621         if (!shost)
7622                 return;
7623
7624         scsi_remove_host(shost);
7625         scsi_host_put(shost);
7626 }
7627
7628 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7629 {
7630         int rc = 0;
7631         struct pqi_device_registers __iomem *pqi_registers;
7632         unsigned long timeout;
7633         unsigned int timeout_msecs;
7634         union pqi_reset_register reset_reg;
7635
7636         pqi_registers = ctrl_info->pqi_registers;
7637         timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7638         timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7639
7640         while (1) {
7641                 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7642                 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7643                 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7644                         break;
7645                 if (!sis_is_firmware_running(ctrl_info)) {
7646                         rc = -ENXIO;
7647                         break;
7648                 }
7649                 if (time_after(jiffies, timeout)) {
7650                         rc = -ETIMEDOUT;
7651                         break;
7652                 }
7653         }
7654
7655         return rc;
7656 }
7657
7658 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7659 {
7660         int rc;
7661         union pqi_reset_register reset_reg;
7662
7663         if (ctrl_info->pqi_reset_quiesce_supported) {
7664                 rc = sis_pqi_reset_quiesce(ctrl_info);
7665                 if (rc) {
7666                         dev_err(&ctrl_info->pci_dev->dev,
7667                                 "PQI reset failed during quiesce with error %d\n", rc);
7668                         return rc;
7669                 }
7670         }
7671
7672         reset_reg.all_bits = 0;
7673         reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7674         reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7675
7676         writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7677
7678         rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7679         if (rc)
7680                 dev_err(&ctrl_info->pci_dev->dev,
7681                         "PQI reset failed with error %d\n", rc);
7682
7683         return rc;
7684 }
7685
7686 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7687 {
7688         int rc;
7689         struct bmic_sense_subsystem_info *sense_info;
7690
7691         sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7692         if (!sense_info)
7693                 return -ENOMEM;
7694
7695         rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7696         if (rc)
7697                 goto out;
7698
7699         memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7700                 sizeof(sense_info->ctrl_serial_number));
7701         ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7702
7703 out:
7704         kfree(sense_info);
7705
7706         return rc;
7707 }
7708
7709 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7710 {
7711         int rc;
7712         struct bmic_identify_controller *identify;
7713
7714         identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7715         if (!identify)
7716                 return -ENOMEM;
7717
7718         rc = pqi_identify_controller(ctrl_info, identify);
7719         if (rc)
7720                 goto out;
7721
7722         if (get_unaligned_le32(&identify->extra_controller_flags) &
7723                 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7724                 memcpy(ctrl_info->firmware_version,
7725                         identify->firmware_version_long,
7726                         sizeof(identify->firmware_version_long));
7727         } else {
7728                 memcpy(ctrl_info->firmware_version,
7729                         identify->firmware_version_short,
7730                         sizeof(identify->firmware_version_short));
7731                 ctrl_info->firmware_version
7732                         [sizeof(identify->firmware_version_short)] = '\0';
7733                 snprintf(ctrl_info->firmware_version +
7734                         strlen(ctrl_info->firmware_version),
7735                         sizeof(ctrl_info->firmware_version) -
7736                         sizeof(identify->firmware_version_short),
7737                         "-%u",
7738                         get_unaligned_le16(&identify->firmware_build_number));
7739         }
7740
7741         memcpy(ctrl_info->model, identify->product_id,
7742                 sizeof(identify->product_id));
7743         ctrl_info->model[sizeof(identify->product_id)] = '\0';
7744
7745         memcpy(ctrl_info->vendor, identify->vendor_id,
7746                 sizeof(identify->vendor_id));
7747         ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7748
7749         dev_info(&ctrl_info->pci_dev->dev,
7750                 "Firmware version: %s\n", ctrl_info->firmware_version);
7751
7752 out:
7753         kfree(identify);
7754
7755         return rc;
7756 }
7757
7758 struct pqi_config_table_section_info {
7759         struct pqi_ctrl_info *ctrl_info;
7760         void            *section;
7761         u32             section_offset;
7762         void __iomem    *section_iomem_addr;
7763 };
7764
7765 static inline bool pqi_is_firmware_feature_supported(
7766         struct pqi_config_table_firmware_features *firmware_features,
7767         unsigned int bit_position)
7768 {
7769         unsigned int byte_index;
7770
7771         byte_index = bit_position / BITS_PER_BYTE;
7772
7773         if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7774                 return false;
7775
7776         return firmware_features->features_supported[byte_index] &
7777                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7778 }
7779
7780 static inline bool pqi_is_firmware_feature_enabled(
7781         struct pqi_config_table_firmware_features *firmware_features,
7782         void __iomem *firmware_features_iomem_addr,
7783         unsigned int bit_position)
7784 {
7785         unsigned int byte_index;
7786         u8 __iomem *features_enabled_iomem_addr;
7787
7788         byte_index = (bit_position / BITS_PER_BYTE) +
7789                 (le16_to_cpu(firmware_features->num_elements) * 2);
7790
7791         features_enabled_iomem_addr = firmware_features_iomem_addr +
7792                 offsetof(struct pqi_config_table_firmware_features,
7793                         features_supported) + byte_index;
7794
7795         return *((__force u8 *)features_enabled_iomem_addr) &
7796                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7797 }
7798
7799 static inline void pqi_request_firmware_feature(
7800         struct pqi_config_table_firmware_features *firmware_features,
7801         unsigned int bit_position)
7802 {
7803         unsigned int byte_index;
7804
7805         byte_index = (bit_position / BITS_PER_BYTE) +
7806                 le16_to_cpu(firmware_features->num_elements);
7807
7808         firmware_features->features_supported[byte_index] |=
7809                 (1 << (bit_position % BITS_PER_BYTE));
7810 }
7811
7812 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7813         u16 first_section, u16 last_section)
7814 {
7815         struct pqi_vendor_general_request request;
7816
7817         memset(&request, 0, sizeof(request));
7818
7819         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7820         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7821                 &request.header.iu_length);
7822         put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7823                 &request.function_code);
7824         put_unaligned_le16(first_section,
7825                 &request.data.config_table_update.first_section);
7826         put_unaligned_le16(last_section,
7827                 &request.data.config_table_update.last_section);
7828
7829         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7830 }
7831
7832 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7833         struct pqi_config_table_firmware_features *firmware_features,
7834         void __iomem *firmware_features_iomem_addr)
7835 {
7836         void *features_requested;
7837         void __iomem *features_requested_iomem_addr;
7838         void __iomem *host_max_known_feature_iomem_addr;
7839
7840         features_requested = firmware_features->features_supported +
7841                 le16_to_cpu(firmware_features->num_elements);
7842
7843         features_requested_iomem_addr = firmware_features_iomem_addr +
7844                 (features_requested - (void *)firmware_features);
7845
7846         memcpy_toio(features_requested_iomem_addr, features_requested,
7847                 le16_to_cpu(firmware_features->num_elements));
7848
7849         if (pqi_is_firmware_feature_supported(firmware_features,
7850                 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7851                 host_max_known_feature_iomem_addr =
7852                         features_requested_iomem_addr +
7853                         (le16_to_cpu(firmware_features->num_elements) * 2) +
7854                         sizeof(__le16);
7855                 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7856                 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7857         }
7858
7859         return pqi_config_table_update(ctrl_info,
7860                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7861                 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7862 }
7863
7864 struct pqi_firmware_feature {
7865         char            *feature_name;
7866         unsigned int    feature_bit;
7867         bool            supported;
7868         bool            enabled;
7869         void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7870                 struct pqi_firmware_feature *firmware_feature);
7871 };
7872
7873 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7874         struct pqi_firmware_feature *firmware_feature)
7875 {
7876         if (!firmware_feature->supported) {
7877                 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7878                         firmware_feature->feature_name);
7879                 return;
7880         }
7881
7882         if (firmware_feature->enabled) {
7883                 dev_info(&ctrl_info->pci_dev->dev,
7884                         "%s enabled\n", firmware_feature->feature_name);
7885                 return;
7886         }
7887
7888         dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7889                 firmware_feature->feature_name);
7890 }
7891
7892 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7893         struct pqi_firmware_feature *firmware_feature)
7894 {
7895         switch (firmware_feature->feature_bit) {
7896         case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7897                 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7898                 break;
7899         case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7900                 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7901                 break;
7902         case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7903                 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7904                 break;
7905         case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7906                 ctrl_info->soft_reset_handshake_supported =
7907                         firmware_feature->enabled &&
7908                         pqi_read_soft_reset_status(ctrl_info);
7909                 break;
7910         case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7911                 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7912                 break;
7913         case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7914                 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7915                 break;
7916         case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7917                 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7918                 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7919                 break;
7920         case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7921                 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7922                 break;
7923         case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7924                 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7925                 break;
7926         case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
7927                 ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
7928                 break;
7929         }
7930
7931         pqi_firmware_feature_status(ctrl_info, firmware_feature);
7932 }
7933
7934 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7935         struct pqi_firmware_feature *firmware_feature)
7936 {
7937         if (firmware_feature->feature_status)
7938                 firmware_feature->feature_status(ctrl_info, firmware_feature);
7939 }
7940
7941 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7942
7943 static struct pqi_firmware_feature pqi_firmware_features[] = {
7944         {
7945                 .feature_name = "Online Firmware Activation",
7946                 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7947                 .feature_status = pqi_firmware_feature_status,
7948         },
7949         {
7950                 .feature_name = "Serial Management Protocol",
7951                 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7952                 .feature_status = pqi_firmware_feature_status,
7953         },
7954         {
7955                 .feature_name = "Maximum Known Feature",
7956                 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7957                 .feature_status = pqi_firmware_feature_status,
7958         },
7959         {
7960                 .feature_name = "RAID 0 Read Bypass",
7961                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7962                 .feature_status = pqi_firmware_feature_status,
7963         },
7964         {
7965                 .feature_name = "RAID 1 Read Bypass",
7966                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7967                 .feature_status = pqi_firmware_feature_status,
7968         },
7969         {
7970                 .feature_name = "RAID 5 Read Bypass",
7971                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7972                 .feature_status = pqi_firmware_feature_status,
7973         },
7974         {
7975                 .feature_name = "RAID 6 Read Bypass",
7976                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7977                 .feature_status = pqi_firmware_feature_status,
7978         },
7979         {
7980                 .feature_name = "RAID 0 Write Bypass",
7981                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7982                 .feature_status = pqi_firmware_feature_status,
7983         },
7984         {
7985                 .feature_name = "RAID 1 Write Bypass",
7986                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7987                 .feature_status = pqi_ctrl_update_feature_flags,
7988         },
7989         {
7990                 .feature_name = "RAID 5 Write Bypass",
7991                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7992                 .feature_status = pqi_ctrl_update_feature_flags,
7993         },
7994         {
7995                 .feature_name = "RAID 6 Write Bypass",
7996                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7997                 .feature_status = pqi_ctrl_update_feature_flags,
7998         },
7999         {
8000                 .feature_name = "New Soft Reset Handshake",
8001                 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
8002                 .feature_status = pqi_ctrl_update_feature_flags,
8003         },
8004         {
8005                 .feature_name = "RAID IU Timeout",
8006                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
8007                 .feature_status = pqi_ctrl_update_feature_flags,
8008         },
8009         {
8010                 .feature_name = "TMF IU Timeout",
8011                 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
8012                 .feature_status = pqi_ctrl_update_feature_flags,
8013         },
8014         {
8015                 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
8016                 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
8017                 .feature_status = pqi_firmware_feature_status,
8018         },
8019         {
8020                 .feature_name = "Firmware Triage",
8021                 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
8022                 .feature_status = pqi_ctrl_update_feature_flags,
8023         },
8024         {
8025                 .feature_name = "RPL Extended Formats 4 and 5",
8026                 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
8027                 .feature_status = pqi_ctrl_update_feature_flags,
8028         },
8029         {
8030                 .feature_name = "Multi-LUN Target",
8031                 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
8032                 .feature_status = pqi_ctrl_update_feature_flags,
8033         },
8034         {
8035                 .feature_name = "Controller Data Logging",
8036                 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
8037                 .feature_status = pqi_ctrl_update_feature_flags,
8038         },
8039 };
8040
8041 static void pqi_process_firmware_features(
8042         struct pqi_config_table_section_info *section_info)
8043 {
8044         int rc;
8045         struct pqi_ctrl_info *ctrl_info;
8046         struct pqi_config_table_firmware_features *firmware_features;
8047         void __iomem *firmware_features_iomem_addr;
8048         unsigned int i;
8049         unsigned int num_features_supported;
8050
8051         ctrl_info = section_info->ctrl_info;
8052         firmware_features = section_info->section;
8053         firmware_features_iomem_addr = section_info->section_iomem_addr;
8054
8055         for (i = 0, num_features_supported = 0;
8056                 i < ARRAY_SIZE(pqi_firmware_features); i++) {
8057                 if (pqi_is_firmware_feature_supported(firmware_features,
8058                         pqi_firmware_features[i].feature_bit)) {
8059                         pqi_firmware_features[i].supported = true;
8060                         num_features_supported++;
8061                 } else {
8062                         pqi_firmware_feature_update(ctrl_info,
8063                                 &pqi_firmware_features[i]);
8064                 }
8065         }
8066
8067         if (num_features_supported == 0)
8068                 return;
8069
8070         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8071                 if (!pqi_firmware_features[i].supported)
8072                         continue;
8073                 pqi_request_firmware_feature(firmware_features,
8074                         pqi_firmware_features[i].feature_bit);
8075         }
8076
8077         rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
8078                 firmware_features_iomem_addr);
8079         if (rc) {
8080                 dev_err(&ctrl_info->pci_dev->dev,
8081                         "failed to enable firmware features in PQI configuration table\n");
8082                 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8083                         if (!pqi_firmware_features[i].supported)
8084                                 continue;
8085                         pqi_firmware_feature_update(ctrl_info,
8086                                 &pqi_firmware_features[i]);
8087                 }
8088                 return;
8089         }
8090
8091         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8092                 if (!pqi_firmware_features[i].supported)
8093                         continue;
8094                 if (pqi_is_firmware_feature_enabled(firmware_features,
8095                         firmware_features_iomem_addr,
8096                         pqi_firmware_features[i].feature_bit)) {
8097                                 pqi_firmware_features[i].enabled = true;
8098                 }
8099                 pqi_firmware_feature_update(ctrl_info,
8100                         &pqi_firmware_features[i]);
8101         }
8102 }
8103
8104 static void pqi_init_firmware_features(void)
8105 {
8106         unsigned int i;
8107
8108         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8109                 pqi_firmware_features[i].supported = false;
8110                 pqi_firmware_features[i].enabled = false;
8111         }
8112 }
8113
8114 static void pqi_process_firmware_features_section(
8115         struct pqi_config_table_section_info *section_info)
8116 {
8117         mutex_lock(&pqi_firmware_features_mutex);
8118         pqi_init_firmware_features();
8119         pqi_process_firmware_features(section_info);
8120         mutex_unlock(&pqi_firmware_features_mutex);
8121 }
8122
8123 /*
8124  * Reset all controller settings that can be initialized during the processing
8125  * of the PQI Configuration Table.
8126  */
8127
8128 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
8129 {
8130         ctrl_info->heartbeat_counter = NULL;
8131         ctrl_info->soft_reset_status = NULL;
8132         ctrl_info->soft_reset_handshake_supported = false;
8133         ctrl_info->enable_r1_writes = false;
8134         ctrl_info->enable_r5_writes = false;
8135         ctrl_info->enable_r6_writes = false;
8136         ctrl_info->raid_iu_timeout_supported = false;
8137         ctrl_info->tmf_iu_timeout_supported = false;
8138         ctrl_info->firmware_triage_supported = false;
8139         ctrl_info->rpl_extended_format_4_5_supported = false;
8140         ctrl_info->multi_lun_device_supported = false;
8141         ctrl_info->ctrl_logging_supported = false;
8142 }
8143
8144 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
8145 {
8146         u32 table_length;
8147         u32 section_offset;
8148         bool firmware_feature_section_present;
8149         void __iomem *table_iomem_addr;
8150         struct pqi_config_table *config_table;
8151         struct pqi_config_table_section_header *section;
8152         struct pqi_config_table_section_info section_info;
8153         struct pqi_config_table_section_info feature_section_info = {0};
8154
8155         table_length = ctrl_info->config_table_length;
8156         if (table_length == 0)
8157                 return 0;
8158
8159         config_table = kmalloc(table_length, GFP_KERNEL);
8160         if (!config_table) {
8161                 dev_err(&ctrl_info->pci_dev->dev,
8162                         "failed to allocate memory for PQI configuration table\n");
8163                 return -ENOMEM;
8164         }
8165
8166         /*
8167          * Copy the config table contents from I/O memory space into the
8168          * temporary buffer.
8169          */
8170         table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
8171         memcpy_fromio(config_table, table_iomem_addr, table_length);
8172
8173         firmware_feature_section_present = false;
8174         section_info.ctrl_info = ctrl_info;
8175         section_offset = get_unaligned_le32(&config_table->first_section_offset);
8176
8177         while (section_offset) {
8178                 section = (void *)config_table + section_offset;
8179
8180                 section_info.section = section;
8181                 section_info.section_offset = section_offset;
8182                 section_info.section_iomem_addr = table_iomem_addr + section_offset;
8183
8184                 switch (get_unaligned_le16(&section->section_id)) {
8185                 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
8186                         firmware_feature_section_present = true;
8187                         feature_section_info = section_info;
8188                         break;
8189                 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
8190                         if (pqi_disable_heartbeat)
8191                                 dev_warn(&ctrl_info->pci_dev->dev,
8192                                 "heartbeat disabled by module parameter\n");
8193                         else
8194                                 ctrl_info->heartbeat_counter =
8195                                         table_iomem_addr +
8196                                         section_offset +
8197                                         offsetof(struct pqi_config_table_heartbeat,
8198                                                 heartbeat_counter);
8199                         break;
8200                 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8201                         ctrl_info->soft_reset_status =
8202                                 table_iomem_addr +
8203                                 section_offset +
8204                                 offsetof(struct pqi_config_table_soft_reset,
8205                                         soft_reset_status);
8206                         break;
8207                 }
8208
8209                 section_offset = get_unaligned_le16(&section->next_section_offset);
8210         }
8211
8212         /*
8213          * We process the firmware feature section after all other sections
8214          * have been processed so that the feature bit callbacks can take
8215          * into account the settings configured by other sections.
8216          */
8217         if (firmware_feature_section_present)
8218                 pqi_process_firmware_features_section(&feature_section_info);
8219
8220         kfree(config_table);
8221
8222         return 0;
8223 }
8224
8225 /* Switches the controller from PQI mode back into SIS mode. */
8226
8227 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8228 {
8229         int rc;
8230
8231         pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8232         rc = pqi_reset(ctrl_info);
8233         if (rc)
8234                 return rc;
8235         rc = sis_reenable_sis_mode(ctrl_info);
8236         if (rc) {
8237                 dev_err(&ctrl_info->pci_dev->dev,
8238                         "re-enabling SIS mode failed with error %d\n", rc);
8239                 return rc;
8240         }
8241         pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8242
8243         return 0;
8244 }
8245
8246 /*
8247  * If the controller isn't already in SIS mode, this function forces it into
8248  * SIS mode.
8249  */
8250
8251 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8252 {
8253         if (!sis_is_firmware_running(ctrl_info))
8254                 return -ENXIO;
8255
8256         if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8257                 return 0;
8258
8259         if (sis_is_kernel_up(ctrl_info)) {
8260                 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8261                 return 0;
8262         }
8263
8264         return pqi_revert_to_sis_mode(ctrl_info);
8265 }
8266
8267 static void pqi_perform_lockup_action(void)
8268 {
8269         switch (pqi_lockup_action) {
8270         case PANIC:
8271                 panic("FATAL: Smart Family Controller lockup detected");
8272                 break;
8273         case REBOOT:
8274                 emergency_restart();
8275                 break;
8276         case NONE:
8277         default:
8278                 break;
8279         }
8280 }
8281
8282 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
8283 #define PQI_CTRL_LOG_MIN_SIZE   (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
8284
8285 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8286 {
8287         int rc;
8288         u32 product_id;
8289
8290         if (reset_devices) {
8291                 if (pqi_is_fw_triage_supported(ctrl_info)) {
8292                         rc = sis_wait_for_fw_triage_completion(ctrl_info);
8293                         if (rc)
8294                                 return rc;
8295                 }
8296                 if (sis_is_ctrl_logging_supported(ctrl_info)) {
8297                         sis_notify_kdump(ctrl_info);
8298                         rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
8299                         if (rc)
8300                                 return rc;
8301                 }
8302                 sis_soft_reset(ctrl_info);
8303                 ssleep(PQI_POST_RESET_DELAY_SECS);
8304         } else {
8305                 rc = pqi_force_sis_mode(ctrl_info);
8306                 if (rc)
8307                         return rc;
8308         }
8309
8310         /*
8311          * Wait until the controller is ready to start accepting SIS
8312          * commands.
8313          */
8314         rc = sis_wait_for_ctrl_ready(ctrl_info);
8315         if (rc) {
8316                 if (reset_devices) {
8317                         dev_err(&ctrl_info->pci_dev->dev,
8318                                 "kdump init failed with error %d\n", rc);
8319                         pqi_lockup_action = REBOOT;
8320                         pqi_perform_lockup_action();
8321                 }
8322                 return rc;
8323         }
8324
8325         /*
8326          * Get the controller properties.  This allows us to determine
8327          * whether or not it supports PQI mode.
8328          */
8329         rc = sis_get_ctrl_properties(ctrl_info);
8330         if (rc) {
8331                 dev_err(&ctrl_info->pci_dev->dev,
8332                         "error obtaining controller properties\n");
8333                 return rc;
8334         }
8335
8336         rc = sis_get_pqi_capabilities(ctrl_info);
8337         if (rc) {
8338                 dev_err(&ctrl_info->pci_dev->dev,
8339                         "error obtaining controller capabilities\n");
8340                 return rc;
8341         }
8342
8343         product_id = sis_get_product_id(ctrl_info);
8344         ctrl_info->product_id = (u8)product_id;
8345         ctrl_info->product_revision = (u8)(product_id >> 8);
8346
8347         if (reset_devices) {
8348                 if (ctrl_info->max_outstanding_requests >
8349                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8350                                 ctrl_info->max_outstanding_requests =
8351                                         PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8352         } else {
8353                 if (ctrl_info->max_outstanding_requests >
8354                         PQI_MAX_OUTSTANDING_REQUESTS)
8355                                 ctrl_info->max_outstanding_requests =
8356                                         PQI_MAX_OUTSTANDING_REQUESTS;
8357         }
8358
8359         pqi_calculate_io_resources(ctrl_info);
8360
8361         rc = pqi_alloc_error_buffer(ctrl_info);
8362         if (rc) {
8363                 dev_err(&ctrl_info->pci_dev->dev,
8364                         "failed to allocate PQI error buffer\n");
8365                 return rc;
8366         }
8367
8368         /*
8369          * If the function we are about to call succeeds, the
8370          * controller will transition from legacy SIS mode
8371          * into PQI mode.
8372          */
8373         rc = sis_init_base_struct_addr(ctrl_info);
8374         if (rc) {
8375                 dev_err(&ctrl_info->pci_dev->dev,
8376                         "error initializing PQI mode\n");
8377                 return rc;
8378         }
8379
8380         /* Wait for the controller to complete the SIS -> PQI transition. */
8381         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8382         if (rc) {
8383                 dev_err(&ctrl_info->pci_dev->dev,
8384                         "transition to PQI mode failed\n");
8385                 return rc;
8386         }
8387
8388         /* From here on, we are running in PQI mode. */
8389         ctrl_info->pqi_mode_enabled = true;
8390         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8391
8392         rc = pqi_alloc_admin_queues(ctrl_info);
8393         if (rc) {
8394                 dev_err(&ctrl_info->pci_dev->dev,
8395                         "failed to allocate admin queues\n");
8396                 return rc;
8397         }
8398
8399         rc = pqi_create_admin_queues(ctrl_info);
8400         if (rc) {
8401                 dev_err(&ctrl_info->pci_dev->dev,
8402                         "error creating admin queues\n");
8403                 return rc;
8404         }
8405
8406         rc = pqi_report_device_capability(ctrl_info);
8407         if (rc) {
8408                 dev_err(&ctrl_info->pci_dev->dev,
8409                         "obtaining device capability failed\n");
8410                 return rc;
8411         }
8412
8413         rc = pqi_validate_device_capability(ctrl_info);
8414         if (rc)
8415                 return rc;
8416
8417         pqi_calculate_queue_resources(ctrl_info);
8418
8419         rc = pqi_enable_msix_interrupts(ctrl_info);
8420         if (rc)
8421                 return rc;
8422
8423         if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8424                 ctrl_info->max_msix_vectors =
8425                         ctrl_info->num_msix_vectors_enabled;
8426                 pqi_calculate_queue_resources(ctrl_info);
8427         }
8428
8429         rc = pqi_alloc_io_resources(ctrl_info);
8430         if (rc)
8431                 return rc;
8432
8433         rc = pqi_alloc_operational_queues(ctrl_info);
8434         if (rc) {
8435                 dev_err(&ctrl_info->pci_dev->dev,
8436                         "failed to allocate operational queues\n");
8437                 return rc;
8438         }
8439
8440         pqi_init_operational_queues(ctrl_info);
8441
8442         rc = pqi_create_queues(ctrl_info);
8443         if (rc)
8444                 return rc;
8445
8446         rc = pqi_request_irqs(ctrl_info);
8447         if (rc)
8448                 return rc;
8449
8450         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8451
8452         ctrl_info->controller_online = true;
8453
8454         rc = pqi_process_config_table(ctrl_info);
8455         if (rc)
8456                 return rc;
8457
8458         pqi_start_heartbeat_timer(ctrl_info);
8459
8460         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8461                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8462                 if (rc) { /* Supported features not returned correctly. */
8463                         dev_err(&ctrl_info->pci_dev->dev,
8464                                 "error obtaining advanced RAID bypass configuration\n");
8465                         return rc;
8466                 }
8467                 ctrl_info->ciss_report_log_flags |=
8468                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8469         }
8470
8471         rc = pqi_enable_events(ctrl_info);
8472         if (rc) {
8473                 dev_err(&ctrl_info->pci_dev->dev,
8474                         "error enabling events\n");
8475                 return rc;
8476         }
8477
8478         /* Register with the SCSI subsystem. */
8479         rc = pqi_register_scsi(ctrl_info);
8480         if (rc)
8481                 return rc;
8482
8483         if (ctrl_info->ctrl_logging_supported && !reset_devices) {
8484                 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
8485                 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8486         }
8487
8488         rc = pqi_get_ctrl_product_details(ctrl_info);
8489         if (rc) {
8490                 dev_err(&ctrl_info->pci_dev->dev,
8491                         "error obtaining product details\n");
8492                 return rc;
8493         }
8494
8495         rc = pqi_get_ctrl_serial_number(ctrl_info);
8496         if (rc) {
8497                 dev_err(&ctrl_info->pci_dev->dev,
8498                         "error obtaining ctrl serial number\n");
8499                 return rc;
8500         }
8501
8502         rc = pqi_set_diag_rescan(ctrl_info);
8503         if (rc) {
8504                 dev_err(&ctrl_info->pci_dev->dev,
8505                         "error enabling multi-lun rescan\n");
8506                 return rc;
8507         }
8508
8509         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8510         if (rc) {
8511                 dev_err(&ctrl_info->pci_dev->dev,
8512                         "error updating host wellness\n");
8513                 return rc;
8514         }
8515
8516         pqi_schedule_update_time_worker(ctrl_info);
8517
8518         pqi_scan_scsi_devices(ctrl_info);
8519
8520         return 0;
8521 }
8522
8523 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8524 {
8525         unsigned int i;
8526         struct pqi_admin_queues *admin_queues;
8527         struct pqi_event_queue *event_queue;
8528
8529         admin_queues = &ctrl_info->admin_queues;
8530         admin_queues->iq_pi_copy = 0;
8531         admin_queues->oq_ci_copy = 0;
8532         writel(0, admin_queues->oq_pi);
8533
8534         for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8535                 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8536                 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8537                 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8538
8539                 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8540                 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8541                 writel(0, ctrl_info->queue_groups[i].oq_pi);
8542         }
8543
8544         event_queue = &ctrl_info->event_queue;
8545         writel(0, event_queue->oq_pi);
8546         event_queue->oq_ci_copy = 0;
8547 }
8548
8549 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8550 {
8551         int rc;
8552
8553         rc = pqi_force_sis_mode(ctrl_info);
8554         if (rc)
8555                 return rc;
8556
8557         /*
8558          * Wait until the controller is ready to start accepting SIS
8559          * commands.
8560          */
8561         rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8562         if (rc)
8563                 return rc;
8564
8565         /*
8566          * Get the controller properties.  This allows us to determine
8567          * whether or not it supports PQI mode.
8568          */
8569         rc = sis_get_ctrl_properties(ctrl_info);
8570         if (rc) {
8571                 dev_err(&ctrl_info->pci_dev->dev,
8572                         "error obtaining controller properties\n");
8573                 return rc;
8574         }
8575
8576         rc = sis_get_pqi_capabilities(ctrl_info);
8577         if (rc) {
8578                 dev_err(&ctrl_info->pci_dev->dev,
8579                         "error obtaining controller capabilities\n");
8580                 return rc;
8581         }
8582
8583         /*
8584          * If the function we are about to call succeeds, the
8585          * controller will transition from legacy SIS mode
8586          * into PQI mode.
8587          */
8588         rc = sis_init_base_struct_addr(ctrl_info);
8589         if (rc) {
8590                 dev_err(&ctrl_info->pci_dev->dev,
8591                         "error initializing PQI mode\n");
8592                 return rc;
8593         }
8594
8595         /* Wait for the controller to complete the SIS -> PQI transition. */
8596         rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8597         if (rc) {
8598                 dev_err(&ctrl_info->pci_dev->dev,
8599                         "transition to PQI mode failed\n");
8600                 return rc;
8601         }
8602
8603         /* From here on, we are running in PQI mode. */
8604         ctrl_info->pqi_mode_enabled = true;
8605         pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8606
8607         pqi_reinit_queues(ctrl_info);
8608
8609         rc = pqi_create_admin_queues(ctrl_info);
8610         if (rc) {
8611                 dev_err(&ctrl_info->pci_dev->dev,
8612                         "error creating admin queues\n");
8613                 return rc;
8614         }
8615
8616         rc = pqi_create_queues(ctrl_info);
8617         if (rc)
8618                 return rc;
8619
8620         pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8621
8622         ctrl_info->controller_online = true;
8623         pqi_ctrl_unblock_requests(ctrl_info);
8624
8625         pqi_ctrl_reset_config(ctrl_info);
8626
8627         rc = pqi_process_config_table(ctrl_info);
8628         if (rc)
8629                 return rc;
8630
8631         pqi_start_heartbeat_timer(ctrl_info);
8632
8633         if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8634                 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8635                 if (rc) {
8636                         dev_err(&ctrl_info->pci_dev->dev,
8637                                 "error obtaining advanced RAID bypass configuration\n");
8638                         return rc;
8639                 }
8640                 ctrl_info->ciss_report_log_flags |=
8641                         CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8642         }
8643
8644         rc = pqi_enable_events(ctrl_info);
8645         if (rc) {
8646                 dev_err(&ctrl_info->pci_dev->dev,
8647                         "error enabling events\n");
8648                 return rc;
8649         }
8650
8651         rc = pqi_get_ctrl_product_details(ctrl_info);
8652         if (rc) {
8653                 dev_err(&ctrl_info->pci_dev->dev,
8654                         "error obtaining product details\n");
8655                 return rc;
8656         }
8657
8658         rc = pqi_set_diag_rescan(ctrl_info);
8659         if (rc) {
8660                 dev_err(&ctrl_info->pci_dev->dev,
8661                         "error enabling multi-lun rescan\n");
8662                 return rc;
8663         }
8664
8665         rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8666         if (rc) {
8667                 dev_err(&ctrl_info->pci_dev->dev,
8668                         "error updating host wellness\n");
8669                 return rc;
8670         }
8671
8672         if (pqi_ofa_in_progress(ctrl_info)) {
8673                 pqi_ctrl_unblock_scan(ctrl_info);
8674                 if (ctrl_info->ctrl_logging_supported) {
8675                         if (!ctrl_info->ctrl_log_memory.host_memory)
8676                                 pqi_host_setup_buffer(ctrl_info,
8677                                         &ctrl_info->ctrl_log_memory,
8678                                         PQI_CTRL_LOG_TOTAL_SIZE,
8679                                         PQI_CTRL_LOG_MIN_SIZE);
8680                         pqi_host_memory_update(ctrl_info,
8681                                 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8682                 } else {
8683                         if (ctrl_info->ctrl_log_memory.host_memory)
8684                                 pqi_host_free_buffer(ctrl_info,
8685                                         &ctrl_info->ctrl_log_memory);
8686                 }
8687         }
8688
8689         pqi_scan_scsi_devices(ctrl_info);
8690
8691         return 0;
8692 }
8693
8694 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8695 {
8696         int rc;
8697
8698         rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8699                 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8700
8701         return pcibios_err_to_errno(rc);
8702 }
8703
8704 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8705 {
8706         int rc;
8707         u64 mask;
8708
8709         rc = pci_enable_device(ctrl_info->pci_dev);
8710         if (rc) {
8711                 dev_err(&ctrl_info->pci_dev->dev,
8712                         "failed to enable PCI device\n");
8713                 return rc;
8714         }
8715
8716         if (sizeof(dma_addr_t) > 4)
8717                 mask = DMA_BIT_MASK(64);
8718         else
8719                 mask = DMA_BIT_MASK(32);
8720
8721         rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8722         if (rc) {
8723                 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8724                 goto disable_device;
8725         }
8726
8727         rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8728         if (rc) {
8729                 dev_err(&ctrl_info->pci_dev->dev,
8730                         "failed to obtain PCI resources\n");
8731                 goto disable_device;
8732         }
8733
8734         ctrl_info->iomem_base = ioremap(pci_resource_start(
8735                 ctrl_info->pci_dev, 0),
8736                 pci_resource_len(ctrl_info->pci_dev, 0));
8737         if (!ctrl_info->iomem_base) {
8738                 dev_err(&ctrl_info->pci_dev->dev,
8739                         "failed to map memory for controller registers\n");
8740                 rc = -ENOMEM;
8741                 goto release_regions;
8742         }
8743
8744 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS               0x6
8745
8746         /* Increase the PCIe completion timeout. */
8747         rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8748                 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8749         if (rc) {
8750                 dev_err(&ctrl_info->pci_dev->dev,
8751                         "failed to set PCIe completion timeout\n");
8752                 goto release_regions;
8753         }
8754
8755         /* Enable bus mastering. */
8756         pci_set_master(ctrl_info->pci_dev);
8757
8758         ctrl_info->registers = ctrl_info->iomem_base;
8759         ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8760
8761         pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8762
8763         return 0;
8764
8765 release_regions:
8766         pci_release_regions(ctrl_info->pci_dev);
8767 disable_device:
8768         pci_disable_device(ctrl_info->pci_dev);
8769
8770         return rc;
8771 }
8772
8773 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8774 {
8775         iounmap(ctrl_info->iomem_base);
8776         pci_release_regions(ctrl_info->pci_dev);
8777         if (pci_is_enabled(ctrl_info->pci_dev))
8778                 pci_disable_device(ctrl_info->pci_dev);
8779         pci_set_drvdata(ctrl_info->pci_dev, NULL);
8780 }
8781
8782 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8783 {
8784         struct pqi_ctrl_info *ctrl_info;
8785
8786         ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8787                         GFP_KERNEL, numa_node);
8788         if (!ctrl_info)
8789                 return NULL;
8790
8791         mutex_init(&ctrl_info->scan_mutex);
8792         mutex_init(&ctrl_info->lun_reset_mutex);
8793         mutex_init(&ctrl_info->ofa_mutex);
8794
8795         INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8796         spin_lock_init(&ctrl_info->scsi_device_list_lock);
8797
8798         INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8799         atomic_set(&ctrl_info->num_interrupts, 0);
8800
8801         INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8802         INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8803
8804         timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8805         INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8806
8807         INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8808         INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8809
8810         sema_init(&ctrl_info->sync_request_sem,
8811                 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8812         init_waitqueue_head(&ctrl_info->block_requests_wait);
8813
8814         ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8815         ctrl_info->irq_mode = IRQ_MODE_NONE;
8816         ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8817
8818         ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8819         ctrl_info->max_transfer_encrypted_sas_sata =
8820                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8821         ctrl_info->max_transfer_encrypted_nvme =
8822                 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8823         ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8824         ctrl_info->max_write_raid_1_10_2drive = ~0;
8825         ctrl_info->max_write_raid_1_10_3drive = ~0;
8826         ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8827
8828         return ctrl_info;
8829 }
8830
8831 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8832 {
8833         kfree(ctrl_info);
8834 }
8835
8836 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8837 {
8838         pqi_free_irqs(ctrl_info);
8839         pqi_disable_msix_interrupts(ctrl_info);
8840 }
8841
8842 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8843 {
8844         pqi_free_interrupts(ctrl_info);
8845         if (ctrl_info->queue_memory_base)
8846                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8847                         ctrl_info->queue_memory_length,
8848                         ctrl_info->queue_memory_base,
8849                         ctrl_info->queue_memory_base_dma_handle);
8850         if (ctrl_info->admin_queue_memory_base)
8851                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8852                         ctrl_info->admin_queue_memory_length,
8853                         ctrl_info->admin_queue_memory_base,
8854                         ctrl_info->admin_queue_memory_base_dma_handle);
8855         pqi_free_all_io_requests(ctrl_info);
8856         if (ctrl_info->error_buffer)
8857                 dma_free_coherent(&ctrl_info->pci_dev->dev,
8858                         ctrl_info->error_buffer_length,
8859                         ctrl_info->error_buffer,
8860                         ctrl_info->error_buffer_dma_handle);
8861         if (ctrl_info->iomem_base)
8862                 pqi_cleanup_pci_init(ctrl_info);
8863         pqi_free_ctrl_info(ctrl_info);
8864 }
8865
8866 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8867 {
8868         ctrl_info->controller_online = false;
8869         pqi_stop_heartbeat_timer(ctrl_info);
8870         pqi_ctrl_block_requests(ctrl_info);
8871         pqi_cancel_rescan_worker(ctrl_info);
8872         pqi_cancel_update_time_worker(ctrl_info);
8873         if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8874                 pqi_fail_all_outstanding_requests(ctrl_info);
8875                 ctrl_info->pqi_mode_enabled = false;
8876         }
8877         pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
8878         pqi_unregister_scsi(ctrl_info);
8879         if (ctrl_info->pqi_mode_enabled)
8880                 pqi_revert_to_sis_mode(ctrl_info);
8881         pqi_free_ctrl_resources(ctrl_info);
8882 }
8883
8884 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8885 {
8886         pqi_ctrl_block_scan(ctrl_info);
8887         pqi_scsi_block_requests(ctrl_info);
8888         pqi_ctrl_block_device_reset(ctrl_info);
8889         pqi_ctrl_block_requests(ctrl_info);
8890         pqi_ctrl_wait_until_quiesced(ctrl_info);
8891         pqi_stop_heartbeat_timer(ctrl_info);
8892 }
8893
8894 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8895 {
8896         pqi_start_heartbeat_timer(ctrl_info);
8897         pqi_ctrl_unblock_requests(ctrl_info);
8898         pqi_ctrl_unblock_device_reset(ctrl_info);
8899         pqi_scsi_unblock_requests(ctrl_info);
8900         pqi_ctrl_unblock_scan(ctrl_info);
8901 }
8902
8903 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8904 {
8905         ssleep(delay_secs);
8906
8907         return pqi_ctrl_init_resume(ctrl_info);
8908 }
8909
8910 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
8911         struct pqi_host_memory_descriptor *host_memory_descriptor,
8912         u32 total_size, u32 chunk_size)
8913 {
8914         int i;
8915         u32 sg_count;
8916         struct device *dev;
8917         struct pqi_host_memory *host_memory;
8918         struct pqi_sg_descriptor *mem_descriptor;
8919         dma_addr_t dma_handle;
8920
8921         sg_count = DIV_ROUND_UP(total_size, chunk_size);
8922         if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
8923                 goto out;
8924
8925         host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
8926         if (!host_memory_descriptor->host_chunk_virt_address)
8927                 goto out;
8928
8929         dev = &ctrl_info->pci_dev->dev;
8930         host_memory = host_memory_descriptor->host_memory;
8931
8932         for (i = 0; i < sg_count; i++) {
8933                 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8934                 if (!host_memory_descriptor->host_chunk_virt_address[i])
8935                         goto out_free_chunks;
8936                 mem_descriptor = &host_memory->sg_descriptor[i];
8937                 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8938                 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8939         }
8940
8941         put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8942         put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
8943         put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
8944
8945         return 0;
8946
8947 out_free_chunks:
8948         while (--i >= 0) {
8949                 mem_descriptor = &host_memory->sg_descriptor[i];
8950                 dma_free_coherent(dev, chunk_size,
8951                         host_memory_descriptor->host_chunk_virt_address[i],
8952                         get_unaligned_le64(&mem_descriptor->address));
8953         }
8954         kfree(host_memory_descriptor->host_chunk_virt_address);
8955 out:
8956         return -ENOMEM;
8957 }
8958
8959 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
8960         struct pqi_host_memory_descriptor *host_memory_descriptor,
8961         u32 total_required_size, u32 min_required_size)
8962 {
8963         u32 chunk_size;
8964         u32 min_chunk_size;
8965
8966         if (total_required_size == 0 || min_required_size == 0)
8967                 return 0;
8968
8969         total_required_size = PAGE_ALIGN(total_required_size);
8970         min_required_size = PAGE_ALIGN(min_required_size);
8971         min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
8972         min_chunk_size = PAGE_ALIGN(min_chunk_size);
8973
8974         while (total_required_size >= min_required_size) {
8975                 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
8976                         if (pqi_host_alloc_mem(ctrl_info,
8977                                 host_memory_descriptor, total_required_size,
8978                                 chunk_size) == 0)
8979                                 return 0;
8980                         chunk_size /= 2;
8981                         chunk_size = PAGE_ALIGN(chunk_size);
8982                 }
8983                 total_required_size /= 2;
8984                 total_required_size = PAGE_ALIGN(total_required_size);
8985         }
8986
8987         return -ENOMEM;
8988 }
8989
8990 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
8991         struct pqi_host_memory_descriptor *host_memory_descriptor,
8992         u32 total_size, u32 min_size)
8993 {
8994         struct device *dev;
8995         struct pqi_host_memory *host_memory;
8996
8997         dev = &ctrl_info->pci_dev->dev;
8998
8999         host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
9000                 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
9001         if (!host_memory)
9002                 return;
9003
9004         host_memory_descriptor->host_memory = host_memory;
9005
9006         if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
9007                 total_size, min_size) < 0) {
9008                 dev_err(dev, "failed to allocate firmware usable host buffer\n");
9009                 dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9010                         host_memory_descriptor->host_memory_dma_handle);
9011                 host_memory_descriptor->host_memory = NULL;
9012                 return;
9013         }
9014 }
9015
9016 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
9017         struct pqi_host_memory_descriptor *host_memory_descriptor)
9018 {
9019         unsigned int i;
9020         struct device *dev;
9021         struct pqi_host_memory *host_memory;
9022         struct pqi_sg_descriptor *mem_descriptor;
9023         unsigned int num_memory_descriptors;
9024
9025         host_memory = host_memory_descriptor->host_memory;
9026         if (!host_memory)
9027                 return;
9028
9029         dev = &ctrl_info->pci_dev->dev;
9030
9031         if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
9032                 goto out;
9033
9034         mem_descriptor = host_memory->sg_descriptor;
9035         num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
9036
9037         for (i = 0; i < num_memory_descriptors; i++) {
9038                 dma_free_coherent(dev,
9039                         get_unaligned_le32(&mem_descriptor[i].length),
9040                         host_memory_descriptor->host_chunk_virt_address[i],
9041                         get_unaligned_le64(&mem_descriptor[i].address));
9042         }
9043         kfree(host_memory_descriptor->host_chunk_virt_address);
9044
9045 out:
9046         dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9047                 host_memory_descriptor->host_memory_dma_handle);
9048         host_memory_descriptor->host_memory = NULL;
9049 }
9050
9051 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
9052         struct pqi_host_memory_descriptor *host_memory_descriptor,
9053         u16 function_code)
9054 {
9055         u32 buffer_length;
9056         struct pqi_vendor_general_request request;
9057         struct pqi_host_memory *host_memory;
9058
9059         memset(&request, 0, sizeof(request));
9060
9061         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
9062         put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
9063         put_unaligned_le16(function_code, &request.function_code);
9064
9065         host_memory = host_memory_descriptor->host_memory;
9066
9067         if (host_memory) {
9068                 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
9069                 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
9070                 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
9071
9072                 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
9073                         put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
9074                         memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
9075                 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
9076                         put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
9077                         memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
9078                 }
9079         }
9080
9081         return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
9082 }
9083
9084 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
9085         .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
9086         .status = SAM_STAT_CHECK_CONDITION,
9087 };
9088
9089 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
9090 {
9091         unsigned int i;
9092         struct pqi_io_request *io_request;
9093         struct scsi_cmnd *scmd;
9094         struct scsi_device *sdev;
9095
9096         for (i = 0; i < ctrl_info->max_io_slots; i++) {
9097                 io_request = &ctrl_info->io_request_pool[i];
9098                 if (atomic_read(&io_request->refcount) == 0)
9099                         continue;
9100
9101                 scmd = io_request->scmd;
9102                 if (scmd) {
9103                         sdev = scmd->device;
9104                         if (!sdev || !scsi_device_online(sdev)) {
9105                                 pqi_free_io_request(io_request);
9106                                 continue;
9107                         } else {
9108                                 set_host_byte(scmd, DID_NO_CONNECT);
9109                         }
9110                 } else {
9111                         io_request->status = -ENXIO;
9112                         io_request->error_info =
9113                                 &pqi_ctrl_offline_raid_error_info;
9114                 }
9115
9116                 io_request->io_complete_callback(io_request,
9117                         io_request->context);
9118         }
9119 }
9120
9121 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
9122 {
9123         pqi_perform_lockup_action();
9124         pqi_stop_heartbeat_timer(ctrl_info);
9125         pqi_free_interrupts(ctrl_info);
9126         pqi_cancel_rescan_worker(ctrl_info);
9127         pqi_cancel_update_time_worker(ctrl_info);
9128         pqi_ctrl_wait_until_quiesced(ctrl_info);
9129         pqi_fail_all_outstanding_requests(ctrl_info);
9130         pqi_ctrl_unblock_requests(ctrl_info);
9131 }
9132
9133 static void pqi_ctrl_offline_worker(struct work_struct *work)
9134 {
9135         struct pqi_ctrl_info *ctrl_info;
9136
9137         ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
9138         pqi_take_ctrl_offline_deferred(ctrl_info);
9139 }
9140
9141 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9142 {
9143         char *string;
9144
9145         switch (ctrl_shutdown_reason) {
9146         case PQI_IQ_NOT_DRAINED_TIMEOUT:
9147                 string = "inbound queue not drained timeout";
9148                 break;
9149         case PQI_LUN_RESET_TIMEOUT:
9150                 string = "LUN reset timeout";
9151                 break;
9152         case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
9153                 string = "I/O pending timeout after LUN reset";
9154                 break;
9155         case PQI_NO_HEARTBEAT:
9156                 string = "no controller heartbeat detected";
9157                 break;
9158         case PQI_FIRMWARE_KERNEL_NOT_UP:
9159                 string = "firmware kernel not ready";
9160                 break;
9161         case PQI_OFA_RESPONSE_TIMEOUT:
9162                 string = "OFA response timeout";
9163                 break;
9164         case PQI_INVALID_REQ_ID:
9165                 string = "invalid request ID";
9166                 break;
9167         case PQI_UNMATCHED_REQ_ID:
9168                 string = "unmatched request ID";
9169                 break;
9170         case PQI_IO_PI_OUT_OF_RANGE:
9171                 string = "I/O queue producer index out of range";
9172                 break;
9173         case PQI_EVENT_PI_OUT_OF_RANGE:
9174                 string = "event queue producer index out of range";
9175                 break;
9176         case PQI_UNEXPECTED_IU_TYPE:
9177                 string = "unexpected IU type";
9178                 break;
9179         default:
9180                 string = "unknown reason";
9181                 break;
9182         }
9183
9184         return string;
9185 }
9186
9187 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
9188         enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9189 {
9190         if (!ctrl_info->controller_online)
9191                 return;
9192
9193         ctrl_info->controller_online = false;
9194         ctrl_info->pqi_mode_enabled = false;
9195         pqi_ctrl_block_requests(ctrl_info);
9196         if (!pqi_disable_ctrl_shutdown)
9197                 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
9198         pci_disable_device(ctrl_info->pci_dev);
9199         dev_err(&ctrl_info->pci_dev->dev,
9200                 "controller offline: reason code 0x%x (%s)\n",
9201                 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
9202         schedule_work(&ctrl_info->ctrl_offline_work);
9203 }
9204
9205 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
9206         const struct pci_device_id *id)
9207 {
9208         char *ctrl_description;
9209
9210         if (id->driver_data)
9211                 ctrl_description = (char *)id->driver_data;
9212         else
9213                 ctrl_description = "Microchip Smart Family Controller";
9214
9215         dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
9216 }
9217
9218 static int pqi_pci_probe(struct pci_dev *pci_dev,
9219         const struct pci_device_id *id)
9220 {
9221         int rc;
9222         int node;
9223         struct pqi_ctrl_info *ctrl_info;
9224
9225         pqi_print_ctrl_info(pci_dev, id);
9226
9227         if (pqi_disable_device_id_wildcards &&
9228                 id->subvendor == PCI_ANY_ID &&
9229                 id->subdevice == PCI_ANY_ID) {
9230                 dev_warn(&pci_dev->dev,
9231                         "controller not probed because device ID wildcards are disabled\n");
9232                 return -ENODEV;
9233         }
9234
9235         if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
9236                 dev_warn(&pci_dev->dev,
9237                         "controller device ID matched using wildcards\n");
9238
9239         node = dev_to_node(&pci_dev->dev);
9240         if (node == NUMA_NO_NODE) {
9241                 node = cpu_to_node(0);
9242                 if (node == NUMA_NO_NODE)
9243                         node = 0;
9244                 set_dev_node(&pci_dev->dev, node);
9245         }
9246
9247         ctrl_info = pqi_alloc_ctrl_info(node);
9248         if (!ctrl_info) {
9249                 dev_err(&pci_dev->dev,
9250                         "failed to allocate controller info block\n");
9251                 return -ENOMEM;
9252         }
9253         ctrl_info->numa_node = node;
9254
9255         ctrl_info->pci_dev = pci_dev;
9256
9257         rc = pqi_pci_init(ctrl_info);
9258         if (rc)
9259                 goto error;
9260
9261         rc = pqi_ctrl_init(ctrl_info);
9262         if (rc)
9263                 goto error;
9264
9265         return 0;
9266
9267 error:
9268         pqi_remove_ctrl(ctrl_info);
9269
9270         return rc;
9271 }
9272
9273 static void pqi_pci_remove(struct pci_dev *pci_dev)
9274 {
9275         struct pqi_ctrl_info *ctrl_info;
9276         u16 vendor_id;
9277         int rc;
9278
9279         ctrl_info = pci_get_drvdata(pci_dev);
9280         if (!ctrl_info)
9281                 return;
9282
9283         pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9284         if (vendor_id == 0xffff)
9285                 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9286         else
9287                 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9288
9289         if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9290                 rc = pqi_flush_cache(ctrl_info, RESTART);
9291                 if (rc)
9292                         dev_err(&pci_dev->dev,
9293                                 "unable to flush controller cache during remove\n");
9294         }
9295
9296         pqi_remove_ctrl(ctrl_info);
9297 }
9298
9299 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9300 {
9301         unsigned int i;
9302         struct pqi_io_request *io_request;
9303         struct scsi_cmnd *scmd;
9304
9305         for (i = 0; i < ctrl_info->max_io_slots; i++) {
9306                 io_request = &ctrl_info->io_request_pool[i];
9307                 if (atomic_read(&io_request->refcount) == 0)
9308                         continue;
9309                 scmd = io_request->scmd;
9310                 WARN_ON(scmd != NULL); /* IO command from SML */
9311                 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9312         }
9313 }
9314
9315 static void pqi_shutdown(struct pci_dev *pci_dev)
9316 {
9317         int rc;
9318         struct pqi_ctrl_info *ctrl_info;
9319         enum bmic_flush_cache_shutdown_event shutdown_event;
9320
9321         ctrl_info = pci_get_drvdata(pci_dev);
9322         if (!ctrl_info) {
9323                 dev_err(&pci_dev->dev,
9324                         "cache could not be flushed\n");
9325                 return;
9326         }
9327
9328         pqi_wait_until_ofa_finished(ctrl_info);
9329
9330         pqi_scsi_block_requests(ctrl_info);
9331         pqi_ctrl_block_device_reset(ctrl_info);
9332         pqi_ctrl_block_requests(ctrl_info);
9333         pqi_ctrl_wait_until_quiesced(ctrl_info);
9334
9335         if (system_state == SYSTEM_RESTART)
9336                 shutdown_event = RESTART;
9337         else
9338                 shutdown_event = SHUTDOWN;
9339
9340         /*
9341          * Write all data in the controller's battery-backed cache to
9342          * storage.
9343          */
9344         rc = pqi_flush_cache(ctrl_info, shutdown_event);
9345         if (rc)
9346                 dev_err(&pci_dev->dev,
9347                         "unable to flush controller cache during shutdown\n");
9348
9349         pqi_crash_if_pending_command(ctrl_info);
9350         pqi_reset(ctrl_info);
9351 }
9352
9353 static void pqi_process_lockup_action_param(void)
9354 {
9355         unsigned int i;
9356
9357         if (!pqi_lockup_action_param)
9358                 return;
9359
9360         for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9361                 if (strcmp(pqi_lockup_action_param,
9362                         pqi_lockup_actions[i].name) == 0) {
9363                         pqi_lockup_action = pqi_lockup_actions[i].action;
9364                         return;
9365                 }
9366         }
9367
9368         pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9369                 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9370 }
9371
9372 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS           30
9373 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS           (30 * 60)
9374
9375 static void pqi_process_ctrl_ready_timeout_param(void)
9376 {
9377         if (pqi_ctrl_ready_timeout_secs == 0)
9378                 return;
9379
9380         if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9381                 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9382                         DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9383                 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9384         } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9385                 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9386                         DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9387                 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9388         }
9389
9390         sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9391 }
9392
9393 static void pqi_process_module_params(void)
9394 {
9395         pqi_process_lockup_action_param();
9396         pqi_process_ctrl_ready_timeout_param();
9397 }
9398
9399 #if defined(CONFIG_PM)
9400
9401 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9402 {
9403         if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9404                 return RESTART;
9405
9406         return SUSPEND;
9407 }
9408
9409 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9410 {
9411         struct pci_dev *pci_dev;
9412         struct pqi_ctrl_info *ctrl_info;
9413
9414         pci_dev = to_pci_dev(dev);
9415         ctrl_info = pci_get_drvdata(pci_dev);
9416
9417         pqi_wait_until_ofa_finished(ctrl_info);
9418
9419         pqi_ctrl_block_scan(ctrl_info);
9420         pqi_scsi_block_requests(ctrl_info);
9421         pqi_ctrl_block_device_reset(ctrl_info);
9422         pqi_ctrl_block_requests(ctrl_info);
9423         pqi_ctrl_wait_until_quiesced(ctrl_info);
9424
9425         if (suspend) {
9426                 enum bmic_flush_cache_shutdown_event shutdown_event;
9427
9428                 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9429                 pqi_flush_cache(ctrl_info, shutdown_event);
9430         }
9431
9432         pqi_stop_heartbeat_timer(ctrl_info);
9433         pqi_crash_if_pending_command(ctrl_info);
9434         pqi_free_irqs(ctrl_info);
9435
9436         ctrl_info->controller_online = false;
9437         ctrl_info->pqi_mode_enabled = false;
9438
9439         return 0;
9440 }
9441
9442 static __maybe_unused int pqi_suspend(struct device *dev)
9443 {
9444         return pqi_suspend_or_freeze(dev, true);
9445 }
9446
9447 static int pqi_resume_or_restore(struct device *dev)
9448 {
9449         int rc;
9450         struct pci_dev *pci_dev;
9451         struct pqi_ctrl_info *ctrl_info;
9452
9453         pci_dev = to_pci_dev(dev);
9454         ctrl_info = pci_get_drvdata(pci_dev);
9455
9456         rc = pqi_request_irqs(ctrl_info);
9457         if (rc)
9458                 return rc;
9459
9460         pqi_ctrl_unblock_device_reset(ctrl_info);
9461         pqi_ctrl_unblock_requests(ctrl_info);
9462         pqi_scsi_unblock_requests(ctrl_info);
9463         pqi_ctrl_unblock_scan(ctrl_info);
9464
9465         ssleep(PQI_POST_RESET_DELAY_SECS);
9466
9467         return pqi_ctrl_init_resume(ctrl_info);
9468 }
9469
9470 static int pqi_freeze(struct device *dev)
9471 {
9472         return pqi_suspend_or_freeze(dev, false);
9473 }
9474
9475 static int pqi_thaw(struct device *dev)
9476 {
9477         int rc;
9478         struct pci_dev *pci_dev;
9479         struct pqi_ctrl_info *ctrl_info;
9480
9481         pci_dev = to_pci_dev(dev);
9482         ctrl_info = pci_get_drvdata(pci_dev);
9483
9484         rc = pqi_request_irqs(ctrl_info);
9485         if (rc)
9486                 return rc;
9487
9488         ctrl_info->controller_online = true;
9489         ctrl_info->pqi_mode_enabled = true;
9490
9491         pqi_ctrl_unblock_device_reset(ctrl_info);
9492         pqi_ctrl_unblock_requests(ctrl_info);
9493         pqi_scsi_unblock_requests(ctrl_info);
9494         pqi_ctrl_unblock_scan(ctrl_info);
9495
9496         return 0;
9497 }
9498
9499 static int pqi_poweroff(struct device *dev)
9500 {
9501         struct pci_dev *pci_dev;
9502         struct pqi_ctrl_info *ctrl_info;
9503         enum bmic_flush_cache_shutdown_event shutdown_event;
9504
9505         pci_dev = to_pci_dev(dev);
9506         ctrl_info = pci_get_drvdata(pci_dev);
9507
9508         shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9509         pqi_flush_cache(ctrl_info, shutdown_event);
9510
9511         return 0;
9512 }
9513
9514 static const struct dev_pm_ops pqi_pm_ops = {
9515         .suspend = pqi_suspend,
9516         .resume = pqi_resume_or_restore,
9517         .freeze = pqi_freeze,
9518         .thaw = pqi_thaw,
9519         .poweroff = pqi_poweroff,
9520         .restore = pqi_resume_or_restore,
9521 };
9522
9523 #endif /* CONFIG_PM */
9524
9525 /* Define the PCI IDs for the controllers that we support. */
9526 static const struct pci_device_id pqi_pci_id_table[] = {
9527         {
9528                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9529                                0x105b, 0x1211)
9530         },
9531         {
9532                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9533                                0x105b, 0x1321)
9534         },
9535         {
9536                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9537                                0x152d, 0x8a22)
9538         },
9539         {
9540                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9541                                0x152d, 0x8a23)
9542         },
9543         {
9544                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9545                                0x152d, 0x8a24)
9546         },
9547         {
9548                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9549                                0x152d, 0x8a36)
9550         },
9551         {
9552                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9553                                0x152d, 0x8a37)
9554         },
9555         {
9556                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9557                                0x193d, 0x0462)
9558         },
9559         {
9560                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9561                                0x193d, 0x1104)
9562         },
9563         {
9564                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9565                                0x193d, 0x1105)
9566         },
9567         {
9568                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9569                                0x193d, 0x1106)
9570         },
9571         {
9572                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9573                                0x193d, 0x1107)
9574         },
9575         {
9576                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9577                                0x193d, 0x1108)
9578         },
9579         {
9580                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9581                                0x193d, 0x1109)
9582         },
9583         {
9584                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9585                                0x193d, 0x110b)
9586         },
9587         {
9588                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9589                                0x193d, 0x1110)
9590         },
9591         {
9592                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9593                                0x193d, 0x8460)
9594         },
9595         {
9596                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9597                                0x193d, 0x8461)
9598         },
9599         {
9600                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9601                                0x193d, 0x8462)
9602         },
9603         {
9604                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9605                                0x193d, 0xc460)
9606         },
9607         {
9608                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9609                                0x193d, 0xc461)
9610         },
9611         {
9612                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9613                                0x193d, 0xf460)
9614         },
9615         {
9616                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9617                                0x193d, 0xf461)
9618         },
9619         {
9620                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9621                                0x1bd4, 0x0045)
9622         },
9623         {
9624                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9625                                0x1bd4, 0x0046)
9626         },
9627         {
9628                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9629                                0x1bd4, 0x0047)
9630         },
9631         {
9632                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9633                                0x1bd4, 0x0048)
9634         },
9635         {
9636                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9637                                0x1bd4, 0x004a)
9638         },
9639         {
9640                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9641                                0x1bd4, 0x004b)
9642         },
9643         {
9644                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9645                                0x1bd4, 0x004c)
9646         },
9647         {
9648                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9649                                0x1bd4, 0x004f)
9650         },
9651         {
9652                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9653                                0x1bd4, 0x0051)
9654         },
9655         {
9656                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9657                                0x1bd4, 0x0052)
9658         },
9659         {
9660                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9661                                0x1bd4, 0x0053)
9662         },
9663         {
9664                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9665                                0x1bd4, 0x0054)
9666         },
9667         {
9668                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9669                                0x1bd4, 0x006b)
9670         },
9671         {
9672                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9673                                0x1bd4, 0x006c)
9674         },
9675         {
9676                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9677                                0x1bd4, 0x006d)
9678         },
9679         {
9680                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9681                                0x1bd4, 0x006f)
9682         },
9683         {
9684                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9685                                0x1bd4, 0x0070)
9686         },
9687         {
9688                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9689                                0x1bd4, 0x0071)
9690         },
9691         {
9692                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9693                                0x1bd4, 0x0072)
9694         },
9695         {
9696                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9697                                0x1bd4, 0x0086)
9698         },
9699         {
9700                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9701                                0x1bd4, 0x0087)
9702         },
9703         {
9704                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9705                                0x1bd4, 0x0088)
9706         },
9707         {
9708                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9709                                0x1bd4, 0x0089)
9710         },
9711         {
9712                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9713                                0x1ff9, 0x00a1)
9714         },
9715         {
9716                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9717                                0x1f3a, 0x0104)
9718         },
9719         {
9720                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9721                                0x19e5, 0xd227)
9722         },
9723         {
9724                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9725                                0x19e5, 0xd228)
9726         },
9727         {
9728                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9729                                0x19e5, 0xd229)
9730         },
9731         {
9732                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9733                                0x19e5, 0xd22a)
9734         },
9735         {
9736                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9737                                0x19e5, 0xd22b)
9738         },
9739         {
9740                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9741                                0x19e5, 0xd22c)
9742         },
9743         {
9744                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9745                                PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9746         },
9747         {
9748                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9749                                PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9750         },
9751         {
9752                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9753                                PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9754         },
9755         {
9756                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9757                                PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9758         },
9759         {
9760                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9761                                PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9762         },
9763         {
9764                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9765                                PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9766         },
9767         {
9768                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9769                                PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9770         },
9771         {
9772                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9773                                PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9774         },
9775         {
9776                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9777                                PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9778         },
9779         {
9780                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9781                                PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9782         },
9783         {
9784                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9785                                PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9786         },
9787         {
9788                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9789                                PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9790         },
9791         {
9792                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9793                                PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9794         },
9795         {
9796                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9797                                PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9798         },
9799         {
9800                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9801                                PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9802         },
9803         {
9804                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9805                                PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9806         },
9807         {
9808                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9809                                PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9810         },
9811         {
9812                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9813                                PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9814         },
9815         {
9816                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9817                                PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9818         },
9819         {
9820                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9821                                PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9822         },
9823         {
9824                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9825                                PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9826         },
9827         {
9828                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9829                                PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9830         },
9831         {
9832                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9833                                PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9834         },
9835         {
9836                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9837                                PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9838         },
9839         {
9840                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9841                                PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9842         },
9843         {
9844                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9845                                PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9846         },
9847         {
9848                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9849                                PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9850         },
9851         {
9852                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9853                                PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9854         },
9855         {
9856                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9857                                PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9858         },
9859         {
9860                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9861                                PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9862         },
9863         {
9864                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9865                                PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9866         },
9867         {
9868                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9869                                PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9870         },
9871         {
9872                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9873                                PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9874         },
9875         {
9876                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9877                                PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9878         },
9879         {
9880                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9881                                PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9882         },
9883         {
9884                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9885                                PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9886         },
9887         {
9888                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9889                                PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9890         },
9891         {
9892                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9893                                PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9894         },
9895         {
9896                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9897                                PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9898         },
9899         {
9900                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9901                                PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9902         },
9903         {
9904                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9905                                PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9906         },
9907         {
9908                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9909                                PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9910         },
9911         {
9912                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9913                                PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9914         },
9915         {
9916                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9917                                PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9918         },
9919         {
9920                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9921                                PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9922         },
9923         {
9924                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9925                                PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9926         },
9927         {
9928                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9929                                PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9930         },
9931         {
9932                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9933                                PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9934         },
9935         {
9936                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9937                                PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9938         },
9939         {
9940                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9941                                PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9942         },
9943         {
9944                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9945                                PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9946         },
9947         {
9948                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9949                                PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9950         },
9951         {
9952                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9953                                PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9954         },
9955         {
9956                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9957                                PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9958         },
9959         {
9960                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9961                                PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9962         },
9963         {
9964                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9965                                PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9966         },
9967         {
9968                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9969                                PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9970         },
9971         {
9972                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9973                                PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9974         },
9975         {
9976                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9977                                PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9978         },
9979         {
9980                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9981                                PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9982         },
9983         {
9984                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9985                                PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9986         },
9987         {
9988                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9989                                PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9990         },
9991         {
9992                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9993                                PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9994         },
9995         {
9996                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9997                                PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
9998         },
9999         {
10000                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10001                                PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
10002         },
10003         {
10004                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10005                                PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
10006         },
10007         {
10008                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10009                                PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
10010         },
10011         {
10012                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10013                                PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
10014         },
10015         {
10016                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10017                                PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
10018         },
10019         {
10020                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10021                                PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
10022         },
10023         {
10024                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10025                                PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
10026         },
10027         {
10028                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10029                                PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
10030         },
10031         {
10032                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10033                                PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
10034         },
10035         {
10036                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10037                                PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
10038         },
10039         {
10040                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10041                                PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
10042         },
10043         {
10044                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10045                                PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
10046         },
10047         {
10048                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10049                                PCI_VENDOR_ID_ADVANTECH, 0x8312)
10050         },
10051         {
10052                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10053                                PCI_VENDOR_ID_DELL, 0x1fe0)
10054         },
10055         {
10056                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10057                                PCI_VENDOR_ID_HP, 0x0600)
10058         },
10059         {
10060                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10061                                PCI_VENDOR_ID_HP, 0x0601)
10062         },
10063         {
10064                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10065                                PCI_VENDOR_ID_HP, 0x0602)
10066         },
10067         {
10068                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10069                                PCI_VENDOR_ID_HP, 0x0603)
10070         },
10071         {
10072                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10073                                PCI_VENDOR_ID_HP, 0x0609)
10074         },
10075         {
10076                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10077                                PCI_VENDOR_ID_HP, 0x0650)
10078         },
10079         {
10080                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10081                                PCI_VENDOR_ID_HP, 0x0651)
10082         },
10083         {
10084                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10085                                PCI_VENDOR_ID_HP, 0x0652)
10086         },
10087         {
10088                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10089                                PCI_VENDOR_ID_HP, 0x0653)
10090         },
10091         {
10092                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10093                                PCI_VENDOR_ID_HP, 0x0654)
10094         },
10095         {
10096                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10097                                PCI_VENDOR_ID_HP, 0x0655)
10098         },
10099         {
10100                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10101                                PCI_VENDOR_ID_HP, 0x0700)
10102         },
10103         {
10104                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10105                                PCI_VENDOR_ID_HP, 0x0701)
10106         },
10107         {
10108                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10109                                PCI_VENDOR_ID_HP, 0x1001)
10110         },
10111         {
10112                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10113                                PCI_VENDOR_ID_HP, 0x1002)
10114         },
10115         {
10116                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10117                                PCI_VENDOR_ID_HP, 0x1100)
10118         },
10119         {
10120                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10121                                PCI_VENDOR_ID_HP, 0x1101)
10122         },
10123         {
10124                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10125                                0x1590, 0x0294)
10126         },
10127         {
10128                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10129                                0x1590, 0x02db)
10130         },
10131         {
10132                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10133                                0x1590, 0x02dc)
10134         },
10135         {
10136                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10137                                0x1590, 0x032e)
10138         },
10139         {
10140                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10141                                0x1590, 0x036f)
10142         },
10143         {
10144                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10145                                0x1590, 0x0381)
10146         },
10147         {
10148                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10149                                0x1590, 0x0382)
10150         },
10151         {
10152                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10153                                0x1590, 0x0383)
10154         },
10155         {
10156                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10157                                0x1d8d, 0x0800)
10158         },
10159         {
10160                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10161                                0x1d8d, 0x0908)
10162         },
10163         {
10164                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10165                                0x1d8d, 0x0806)
10166         },
10167         {
10168                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10169                                0x1d8d, 0x0916)
10170         },
10171         {
10172                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10173                                PCI_VENDOR_ID_GIGABYTE, 0x1000)
10174         },
10175         {
10176                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10177                                0x1dfc, 0x3161)
10178         },
10179         {
10180                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10181                                0x1f0c, 0x3161)
10182         },
10183         {
10184                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10185                                0x1cf2, 0x0804)
10186         },
10187         {
10188                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10189                                0x1cf2, 0x0805)
10190         },
10191         {
10192                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10193                                0x1cf2, 0x0806)
10194         },
10195         {
10196                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10197                                0x1cf2, 0x5445)
10198         },
10199         {
10200                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10201                                0x1cf2, 0x5446)
10202         },
10203         {
10204                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10205                                0x1cf2, 0x5447)
10206         },
10207         {
10208                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10209                                0x1cf2, 0x5449)
10210         },
10211         {
10212                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10213                                0x1cf2, 0x544a)
10214         },
10215         {
10216                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10217                                0x1cf2, 0x544b)
10218         },
10219         {
10220                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10221                                0x1cf2, 0x544d)
10222         },
10223         {
10224                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10225                                0x1cf2, 0x544e)
10226         },
10227         {
10228                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10229                                0x1cf2, 0x544f)
10230         },
10231         {
10232                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10233                                0x1cf2, 0x54da)
10234         },
10235         {
10236                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10237                                0x1cf2, 0x54db)
10238         },
10239         {
10240                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10241                                0x1cf2, 0x54dc)
10242         },
10243         {
10244                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10245                                0x1cf2, 0x0b27)
10246         },
10247         {
10248                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10249                                0x1cf2, 0x0b29)
10250         },
10251         {
10252                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10253                                0x1cf2, 0x0b45)
10254         },
10255         {
10256                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10257                                0x1cc4, 0x0101)
10258         },
10259         {
10260                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10261                                0x1cc4, 0x0201)
10262         },
10263         {
10264                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10265                                PCI_VENDOR_ID_LENOVO, 0x0220)
10266         },
10267         {
10268                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10269                                PCI_VENDOR_ID_LENOVO, 0x0221)
10270         },
10271         {
10272                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10273                                PCI_VENDOR_ID_LENOVO, 0x0520)
10274         },
10275         {
10276                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10277                                PCI_VENDOR_ID_LENOVO, 0x0522)
10278         },
10279         {
10280                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10281                                PCI_VENDOR_ID_LENOVO, 0x0620)
10282         },
10283         {
10284                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10285                                PCI_VENDOR_ID_LENOVO, 0x0621)
10286         },
10287         {
10288                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10289                                PCI_VENDOR_ID_LENOVO, 0x0622)
10290         },
10291         {
10292                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10293                                PCI_VENDOR_ID_LENOVO, 0x0623)
10294         },
10295         {
10296                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10297                                 0x1014, 0x0718)
10298         },
10299         {
10300                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10301                                0x1137, 0x02f8)
10302         },
10303         {
10304                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10305                                0x1137, 0x02f9)
10306         },
10307         {
10308                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10309                                0x1137, 0x02fa)
10310         },
10311         {
10312                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10313                                0x1137, 0x02fe)
10314         },
10315         {
10316                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10317                                0x1137, 0x02ff)
10318         },
10319         {
10320                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10321                                0x1137, 0x0300)
10322         },
10323         {
10324                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10325                                0x1ff9, 0x0045)
10326         },
10327         {
10328                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10329                                0x1ff9, 0x0046)
10330         },
10331         {
10332                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10333                                0x1ff9, 0x0047)
10334         },
10335         {
10336                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10337                                0x1ff9, 0x0048)
10338         },
10339         {
10340                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10341                                0x1ff9, 0x004a)
10342         },
10343         {
10344                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10345                                0x1ff9, 0x004b)
10346         },
10347         {
10348                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10349                                0x1ff9, 0x004c)
10350         },
10351         {
10352                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10353                                0x1ff9, 0x004f)
10354         },
10355         {
10356                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10357                                0x1ff9, 0x0051)
10358         },
10359         {
10360                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10361                                0x1ff9, 0x0052)
10362         },
10363         {
10364                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10365                                0x1ff9, 0x0053)
10366         },
10367         {
10368                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10369                                0x1ff9, 0x0054)
10370         },
10371         {
10372                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10373                                0x1ff9, 0x006b)
10374         },
10375         {
10376                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10377                                0x1ff9, 0x006c)
10378         },
10379         {
10380                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10381                                0x1ff9, 0x006d)
10382         },
10383         {
10384                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10385                                0x1ff9, 0x006f)
10386         },
10387         {
10388                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10389                                0x1ff9, 0x0070)
10390         },
10391         {
10392                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10393                                0x1ff9, 0x0071)
10394         },
10395         {
10396                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10397                                0x1ff9, 0x0072)
10398         },
10399         {
10400                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10401                                0x1ff9, 0x0086)
10402         },
10403         {
10404                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10405                                0x1ff9, 0x0087)
10406         },
10407         {
10408                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10409                                0x1ff9, 0x0088)
10410         },
10411         {
10412                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10413                                0x1ff9, 0x0089)
10414         },
10415         {
10416                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10417                                 0x1e93, 0x1000)
10418         },
10419         {
10420                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10421                                 0x1e93, 0x1001)
10422         },
10423         {
10424                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10425                                 0x1e93, 0x1002)
10426         },
10427         {
10428                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10429                                 0x1e93, 0x1005)
10430         },
10431         {
10432                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10433                                 0x1f51, 0x1001)
10434         },
10435         {
10436                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10437                                 0x1f51, 0x1002)
10438         },
10439         {
10440                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10441                                 0x1f51, 0x1003)
10442         },
10443         {
10444                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10445                                 0x1f51, 0x1004)
10446         },
10447         {
10448                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10449                                 0x1f51, 0x1005)
10450         },
10451         {
10452                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10453                                 0x1f51, 0x1006)
10454         },
10455         {
10456                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10457                                 0x1f51, 0x1007)
10458         },
10459         {
10460                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10461                                 0x1f51, 0x1008)
10462         },
10463         {
10464                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10465                                 0x1f51, 0x1009)
10466         },
10467         {
10468                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10469                                 0x1f51, 0x100a)
10470         },
10471         {
10472                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10473                                0x1f51, 0x100e)
10474         },
10475         {
10476                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10477                                0x1f51, 0x100f)
10478         },
10479         {
10480                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10481                                0x1f51, 0x1010)
10482         },
10483         {
10484                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10485                                0x1f51, 0x1011)
10486         },
10487         {
10488                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10489                                0x1f51, 0x1043)
10490         },
10491         {
10492                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10493                                0x1f51, 0x1044)
10494         },
10495         {
10496                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10497                                0x1f51, 0x1045)
10498         },
10499         {
10500                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10501                                0x1ff9, 0x00a3)
10502         },
10503         {
10504                 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10505                                PCI_ANY_ID, PCI_ANY_ID)
10506         },
10507         { 0 }
10508 };
10509
10510 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10511
10512 static struct pci_driver pqi_pci_driver = {
10513         .name = DRIVER_NAME_SHORT,
10514         .id_table = pqi_pci_id_table,
10515         .probe = pqi_pci_probe,
10516         .remove = pqi_pci_remove,
10517         .shutdown = pqi_shutdown,
10518 #if defined(CONFIG_PM)
10519         .driver = {
10520                 .pm = &pqi_pm_ops
10521         },
10522 #endif
10523 };
10524
10525 static int __init pqi_init(void)
10526 {
10527         int rc;
10528
10529         pr_info(DRIVER_NAME "\n");
10530         pqi_verify_structures();
10531         sis_verify_structures();
10532
10533         pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10534         if (!pqi_sas_transport_template)
10535                 return -ENODEV;
10536
10537         pqi_process_module_params();
10538
10539         rc = pci_register_driver(&pqi_pci_driver);
10540         if (rc)
10541                 sas_release_transport(pqi_sas_transport_template);
10542
10543         return rc;
10544 }
10545
10546 static void __exit pqi_cleanup(void)
10547 {
10548         pci_unregister_driver(&pqi_pci_driver);
10549         sas_release_transport(pqi_sas_transport_template);
10550 }
10551
10552 module_init(pqi_init);
10553 module_exit(pqi_cleanup);
10554
10555 static void pqi_verify_structures(void)
10556 {
10557         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10558                 sis_host_to_ctrl_doorbell) != 0x20);
10559         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10560                 sis_interrupt_mask) != 0x34);
10561         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10562                 sis_ctrl_to_host_doorbell) != 0x9c);
10563         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10564                 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10565         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10566                 sis_driver_scratch) != 0xb0);
10567         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10568                 sis_product_identifier) != 0xb4);
10569         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10570                 sis_firmware_status) != 0xbc);
10571         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10572                 sis_ctrl_shutdown_reason_code) != 0xcc);
10573         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10574                 sis_mailbox) != 0x1000);
10575         BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10576                 pqi_registers) != 0x4000);
10577
10578         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10579                 iu_type) != 0x0);
10580         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10581                 iu_length) != 0x2);
10582         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10583                 response_queue_id) != 0x4);
10584         BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10585                 driver_flags) != 0x6);
10586         BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10587
10588         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10589                 status) != 0x0);
10590         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10591                 service_response) != 0x1);
10592         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10593                 data_present) != 0x2);
10594         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10595                 reserved) != 0x3);
10596         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10597                 residual_count) != 0x4);
10598         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10599                 data_length) != 0x8);
10600         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10601                 reserved1) != 0xa);
10602         BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10603                 data) != 0xc);
10604         BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10605
10606         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10607                 data_in_result) != 0x0);
10608         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10609                 data_out_result) != 0x1);
10610         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10611                 reserved) != 0x2);
10612         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10613                 status) != 0x5);
10614         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10615                 status_qualifier) != 0x6);
10616         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10617                 sense_data_length) != 0x8);
10618         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10619                 response_data_length) != 0xa);
10620         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10621                 data_in_transferred) != 0xc);
10622         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10623                 data_out_transferred) != 0x10);
10624         BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10625                 data) != 0x14);
10626         BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10627
10628         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10629                 signature) != 0x0);
10630         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10631                 function_and_status_code) != 0x8);
10632         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10633                 max_admin_iq_elements) != 0x10);
10634         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10635                 max_admin_oq_elements) != 0x11);
10636         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10637                 admin_iq_element_length) != 0x12);
10638         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10639                 admin_oq_element_length) != 0x13);
10640         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10641                 max_reset_timeout) != 0x14);
10642         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10643                 legacy_intx_status) != 0x18);
10644         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10645                 legacy_intx_mask_set) != 0x1c);
10646         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10647                 legacy_intx_mask_clear) != 0x20);
10648         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10649                 device_status) != 0x40);
10650         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10651                 admin_iq_pi_offset) != 0x48);
10652         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10653                 admin_oq_ci_offset) != 0x50);
10654         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10655                 admin_iq_element_array_addr) != 0x58);
10656         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10657                 admin_oq_element_array_addr) != 0x60);
10658         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10659                 admin_iq_ci_addr) != 0x68);
10660         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10661                 admin_oq_pi_addr) != 0x70);
10662         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10663                 admin_iq_num_elements) != 0x78);
10664         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10665                 admin_oq_num_elements) != 0x79);
10666         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10667                 admin_queue_int_msg_num) != 0x7a);
10668         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10669                 device_error) != 0x80);
10670         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10671                 error_details) != 0x88);
10672         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10673                 device_reset) != 0x90);
10674         BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10675                 power_action) != 0x94);
10676         BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10677
10678         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10679                 header.iu_type) != 0);
10680         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10681                 header.iu_length) != 2);
10682         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10683                 header.driver_flags) != 6);
10684         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10685                 request_id) != 8);
10686         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10687                 function_code) != 10);
10688         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10689                 data.report_device_capability.buffer_length) != 44);
10690         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10691                 data.report_device_capability.sg_descriptor) != 48);
10692         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10693                 data.create_operational_iq.queue_id) != 12);
10694         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10695                 data.create_operational_iq.element_array_addr) != 16);
10696         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10697                 data.create_operational_iq.ci_addr) != 24);
10698         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10699                 data.create_operational_iq.num_elements) != 32);
10700         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10701                 data.create_operational_iq.element_length) != 34);
10702         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10703                 data.create_operational_iq.queue_protocol) != 36);
10704         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10705                 data.create_operational_oq.queue_id) != 12);
10706         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10707                 data.create_operational_oq.element_array_addr) != 16);
10708         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10709                 data.create_operational_oq.pi_addr) != 24);
10710         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10711                 data.create_operational_oq.num_elements) != 32);
10712         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10713                 data.create_operational_oq.element_length) != 34);
10714         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10715                 data.create_operational_oq.queue_protocol) != 36);
10716         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10717                 data.create_operational_oq.int_msg_num) != 40);
10718         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10719                 data.create_operational_oq.coalescing_count) != 42);
10720         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10721                 data.create_operational_oq.min_coalescing_time) != 44);
10722         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10723                 data.create_operational_oq.max_coalescing_time) != 48);
10724         BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10725                 data.delete_operational_queue.queue_id) != 12);
10726         BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10727         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10728                 data.create_operational_iq) != 64 - 11);
10729         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10730                 data.create_operational_oq) != 64 - 11);
10731         BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10732                 data.delete_operational_queue) != 64 - 11);
10733
10734         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10735                 header.iu_type) != 0);
10736         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10737                 header.iu_length) != 2);
10738         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10739                 header.driver_flags) != 6);
10740         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10741                 request_id) != 8);
10742         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10743                 function_code) != 10);
10744         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10745                 status) != 11);
10746         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10747                 data.create_operational_iq.status_descriptor) != 12);
10748         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10749                 data.create_operational_iq.iq_pi_offset) != 16);
10750         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10751                 data.create_operational_oq.status_descriptor) != 12);
10752         BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10753                 data.create_operational_oq.oq_ci_offset) != 16);
10754         BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10755
10756         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10757                 header.iu_type) != 0);
10758         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10759                 header.iu_length) != 2);
10760         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10761                 header.response_queue_id) != 4);
10762         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10763                 header.driver_flags) != 6);
10764         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10765                 request_id) != 8);
10766         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10767                 nexus_id) != 10);
10768         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10769                 buffer_length) != 12);
10770         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10771                 lun_number) != 16);
10772         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10773                 protocol_specific) != 24);
10774         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10775                 error_index) != 27);
10776         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10777                 cdb) != 32);
10778         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10779                 timeout) != 60);
10780         BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10781                 sg_descriptors) != 64);
10782         BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10783                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10784
10785         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10786                 header.iu_type) != 0);
10787         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10788                 header.iu_length) != 2);
10789         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10790                 header.response_queue_id) != 4);
10791         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10792                 header.driver_flags) != 6);
10793         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10794                 request_id) != 8);
10795         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10796                 nexus_id) != 12);
10797         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10798                 buffer_length) != 16);
10799         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10800                 data_encryption_key_index) != 22);
10801         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10802                 encrypt_tweak_lower) != 24);
10803         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10804                 encrypt_tweak_upper) != 28);
10805         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10806                 cdb) != 32);
10807         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10808                 error_index) != 48);
10809         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10810                 num_sg_descriptors) != 50);
10811         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10812                 cdb_length) != 51);
10813         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10814                 lun_number) != 52);
10815         BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10816                 sg_descriptors) != 64);
10817         BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10818                 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10819
10820         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10821                 header.iu_type) != 0);
10822         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10823                 header.iu_length) != 2);
10824         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10825                 request_id) != 8);
10826         BUILD_BUG_ON(offsetof(struct pqi_io_response,
10827                 error_index) != 10);
10828
10829         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10830                 header.iu_type) != 0);
10831         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10832                 header.iu_length) != 2);
10833         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10834                 header.response_queue_id) != 4);
10835         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10836                 request_id) != 8);
10837         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10838                 data.report_event_configuration.buffer_length) != 12);
10839         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10840                 data.report_event_configuration.sg_descriptors) != 16);
10841         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10842                 data.set_event_configuration.global_event_oq_id) != 10);
10843         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10844                 data.set_event_configuration.buffer_length) != 12);
10845         BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10846                 data.set_event_configuration.sg_descriptors) != 16);
10847
10848         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10849                 max_inbound_iu_length) != 6);
10850         BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10851                 max_outbound_iu_length) != 14);
10852         BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10853
10854         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10855                 data_length) != 0);
10856         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10857                 iq_arbitration_priority_support_bitmask) != 8);
10858         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10859                 maximum_aw_a) != 9);
10860         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10861                 maximum_aw_b) != 10);
10862         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10863                 maximum_aw_c) != 11);
10864         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10865                 max_inbound_queues) != 16);
10866         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10867                 max_elements_per_iq) != 18);
10868         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10869                 max_iq_element_length) != 24);
10870         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10871                 min_iq_element_length) != 26);
10872         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10873                 max_outbound_queues) != 30);
10874         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10875                 max_elements_per_oq) != 32);
10876         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10877                 intr_coalescing_time_granularity) != 34);
10878         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10879                 max_oq_element_length) != 36);
10880         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10881                 min_oq_element_length) != 38);
10882         BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10883                 iu_layer_descriptors) != 64);
10884         BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10885
10886         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10887                 event_type) != 0);
10888         BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10889                 oq_id) != 2);
10890         BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10891
10892         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10893                 num_event_descriptors) != 2);
10894         BUILD_BUG_ON(offsetof(struct pqi_event_config,
10895                 descriptors) != 4);
10896
10897         BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10898                 ARRAY_SIZE(pqi_supported_event_types));
10899
10900         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10901                 header.iu_type) != 0);
10902         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10903                 header.iu_length) != 2);
10904         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10905                 event_type) != 8);
10906         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10907                 event_id) != 10);
10908         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10909                 additional_event_id) != 12);
10910         BUILD_BUG_ON(offsetof(struct pqi_event_response,
10911                 data) != 16);
10912         BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10913
10914         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10915                 header.iu_type) != 0);
10916         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10917                 header.iu_length) != 2);
10918         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10919                 event_type) != 8);
10920         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10921                 event_id) != 10);
10922         BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10923                 additional_event_id) != 12);
10924         BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10925
10926         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10927                 header.iu_type) != 0);
10928         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10929                 header.iu_length) != 2);
10930         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10931                 request_id) != 8);
10932         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10933                 nexus_id) != 10);
10934         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10935                 timeout) != 14);
10936         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10937                 lun_number) != 16);
10938         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10939                 protocol_specific) != 24);
10940         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10941                 outbound_queue_id_to_manage) != 26);
10942         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10943                 request_id_to_manage) != 28);
10944         BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10945                 task_management_function) != 30);
10946         BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10947
10948         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10949                 header.iu_type) != 0);
10950         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10951                 header.iu_length) != 2);
10952         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10953                 request_id) != 8);
10954         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10955                 nexus_id) != 10);
10956         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10957                 additional_response_info) != 12);
10958         BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10959                 response_code) != 15);
10960         BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10961
10962         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10963                 configured_logical_drive_count) != 0);
10964         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10965                 configuration_signature) != 1);
10966         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10967                 firmware_version_short) != 5);
10968         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10969                 extended_logical_unit_count) != 154);
10970         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10971                 firmware_build_number) != 190);
10972         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10973                 vendor_id) != 200);
10974         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10975                 product_id) != 208);
10976         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10977                 extra_controller_flags) != 286);
10978         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10979                 controller_mode) != 292);
10980         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10981                 spare_part_number) != 293);
10982         BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10983                 firmware_version_long) != 325);
10984
10985         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10986                 phys_bay_in_box) != 115);
10987         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10988                 device_type) != 120);
10989         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10990                 redundant_path_present_map) != 1736);
10991         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10992                 active_path_number) != 1738);
10993         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10994                 alternate_paths_phys_connector) != 1739);
10995         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10996                 alternate_paths_phys_box_on_port) != 1755);
10997         BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10998                 current_queue_depth_limit) != 1796);
10999         BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
11000
11001         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
11002         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11003                 page_code) != 0);
11004         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11005                 subpage_code) != 1);
11006         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11007                 buffer_length) != 2);
11008
11009         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
11010         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11011                 page_code) != 0);
11012         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11013                 subpage_code) != 1);
11014         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11015                 page_length) != 2);
11016
11017         BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
11018                 != 18);
11019         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11020                 header) != 0);
11021         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11022                 firmware_read_support) != 4);
11023         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11024                 driver_read_support) != 5);
11025         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11026                 firmware_write_support) != 6);
11027         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11028                 driver_write_support) != 7);
11029         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11030                 max_transfer_encrypted_sas_sata) != 8);
11031         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11032                 max_transfer_encrypted_nvme) != 10);
11033         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11034                 max_write_raid_5_6) != 12);
11035         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11036                 max_write_raid_1_10_2drive) != 14);
11037         BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11038                 max_write_raid_1_10_3drive) != 16);
11039
11040         BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
11041         BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
11042         BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
11043                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11044         BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
11045                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11046         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
11047         BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
11048                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11049         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
11050         BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
11051                 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11052
11053         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
11054         BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
11055                 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
11056 }
This page took 0.681656 seconds and 4 git commands to generate.