1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <linux/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "2.1.30-031"
37 #define DRIVER_MAJOR 2
38 #define DRIVER_MINOR 1
39 #define DRIVER_RELEASE 30
40 #define DRIVER_REVISION 31
42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 #define PQI_POST_RESET_DELAY_SECS 5
49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10
51 #define PQI_NO_COMPLETION ((void *)-1)
53 MODULE_AUTHOR("Microchip");
54 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version "
56 MODULE_VERSION(DRIVER_VERSION);
57 MODULE_LICENSE("GPL");
63 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
65 return scsi_cmd_priv(cmd);
68 static void pqi_verify_structures(void);
69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
70 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
71 static void pqi_ctrl_offline_worker(struct work_struct *work);
72 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
73 static void pqi_scan_start(struct Scsi_Host *shost);
74 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
75 struct pqi_queue_group *queue_group, enum pqi_io_path path,
76 struct pqi_io_request *io_request);
77 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
78 struct pqi_iu_header *request, unsigned int flags,
79 struct pqi_raid_error_info *error_info);
80 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
81 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
82 unsigned int cdb_length, struct pqi_queue_group *queue_group,
83 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
84 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
85 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
86 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
87 struct pqi_scsi_dev_raid_map_data *rmd);
88 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
89 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
90 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
91 struct pqi_scsi_dev_raid_map_data *rmd);
92 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
93 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
94 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
95 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size);
96 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor);
97 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code);
98 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
99 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
100 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
101 static void pqi_tmf_worker(struct work_struct *work);
103 /* for flags argument to pqi_submit_raid_request_synchronous() */
104 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
106 static struct scsi_transport_template *pqi_sas_transport_template;
108 static atomic_t pqi_controller_count = ATOMIC_INIT(0);
110 enum pqi_lockup_action {
116 static enum pqi_lockup_action pqi_lockup_action = NONE;
119 enum pqi_lockup_action action;
121 } pqi_lockup_actions[] = {
136 static unsigned int pqi_supported_event_types[] = {
137 PQI_EVENT_TYPE_HOTPLUG,
138 PQI_EVENT_TYPE_HARDWARE,
139 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
140 PQI_EVENT_TYPE_LOGICAL_DEVICE,
142 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
143 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
146 static int pqi_disable_device_id_wildcards;
147 module_param_named(disable_device_id_wildcards,
148 pqi_disable_device_id_wildcards, int, 0644);
149 MODULE_PARM_DESC(disable_device_id_wildcards,
150 "Disable device ID wildcards.");
152 static int pqi_disable_heartbeat;
153 module_param_named(disable_heartbeat,
154 pqi_disable_heartbeat, int, 0644);
155 MODULE_PARM_DESC(disable_heartbeat,
156 "Disable heartbeat.");
158 static int pqi_disable_ctrl_shutdown;
159 module_param_named(disable_ctrl_shutdown,
160 pqi_disable_ctrl_shutdown, int, 0644);
161 MODULE_PARM_DESC(disable_ctrl_shutdown,
162 "Disable controller shutdown when controller locked up.");
164 static char *pqi_lockup_action_param;
165 module_param_named(lockup_action,
166 pqi_lockup_action_param, charp, 0644);
167 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
168 "\t\tSupported: none, reboot, panic\n"
169 "\t\tDefault: none");
171 static int pqi_expose_ld_first;
172 module_param_named(expose_ld_first,
173 pqi_expose_ld_first, int, 0644);
174 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives.");
176 static int pqi_hide_vsep;
177 module_param_named(hide_vsep,
178 pqi_hide_vsep, int, 0644);
179 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives.");
181 static int pqi_disable_managed_interrupts;
182 module_param_named(disable_managed_interrupts,
183 pqi_disable_managed_interrupts, int, 0644);
184 MODULE_PARM_DESC(disable_managed_interrupts,
185 "Disable the kernel automatically assigning SMP affinity to IRQs.");
187 static unsigned int pqi_ctrl_ready_timeout_secs;
188 module_param_named(ctrl_ready_timeout,
189 pqi_ctrl_ready_timeout_secs, uint, 0644);
190 MODULE_PARM_DESC(ctrl_ready_timeout,
191 "Timeout in seconds for driver to wait for controller ready.");
193 static char *raid_levels[] = {
203 static char *pqi_raid_level_to_string(u8 raid_level)
205 if (raid_level < ARRAY_SIZE(raid_levels))
206 return raid_levels[raid_level];
208 return "RAID UNKNOWN";
213 #define SA_RAID_1 2 /* also used for RAID 10 */
214 #define SA_RAID_5 3 /* also used for RAID 50 */
216 #define SA_RAID_6 5 /* also used for RAID 60 */
217 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */
218 #define SA_RAID_MAX SA_RAID_TRIPLE
219 #define SA_RAID_UNKNOWN 0xff
221 static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
223 pqi_prep_for_scsi_done(scmd);
227 static inline void pqi_disable_write_same(struct scsi_device *sdev)
229 sdev->no_write_same = 1;
232 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
234 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
237 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
239 return !device->is_physical_device;
242 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
244 return scsi3addr[2] != 0;
247 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
249 return !ctrl_info->controller_online;
252 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
254 if (ctrl_info->controller_online)
255 if (!sis_is_firmware_running(ctrl_info))
256 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP);
259 static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
261 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
264 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1
265 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2
267 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
269 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE;
272 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
273 enum pqi_ctrl_mode mode)
277 driver_scratch = sis_read_driver_scratch(ctrl_info);
279 if (mode == PQI_MODE)
280 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE;
282 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE;
284 sis_write_driver_scratch(ctrl_info, driver_scratch);
287 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
289 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0;
292 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
296 driver_scratch = sis_read_driver_scratch(ctrl_info);
299 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
301 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED;
303 sis_write_driver_scratch(ctrl_info, driver_scratch);
306 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
308 ctrl_info->scan_blocked = true;
309 mutex_lock(&ctrl_info->scan_mutex);
312 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
314 ctrl_info->scan_blocked = false;
315 mutex_unlock(&ctrl_info->scan_mutex);
318 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
320 return ctrl_info->scan_blocked;
323 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
325 mutex_lock(&ctrl_info->lun_reset_mutex);
328 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
330 mutex_unlock(&ctrl_info->lun_reset_mutex);
333 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
335 struct Scsi_Host *shost;
336 unsigned int num_loops;
339 shost = ctrl_info->scsi_host;
341 scsi_block_requests(shost);
345 while (scsi_host_busy(shost)) {
353 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
355 scsi_unblock_requests(ctrl_info->scsi_host);
358 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
360 atomic_inc(&ctrl_info->num_busy_threads);
363 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
365 atomic_dec(&ctrl_info->num_busy_threads);
368 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
370 return ctrl_info->block_requests;
373 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
375 ctrl_info->block_requests = true;
378 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
380 ctrl_info->block_requests = false;
381 wake_up_all(&ctrl_info->block_requests_wait);
384 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
386 if (!pqi_ctrl_blocked(ctrl_info))
389 atomic_inc(&ctrl_info->num_blocked_threads);
390 wait_event(ctrl_info->block_requests_wait,
391 !pqi_ctrl_blocked(ctrl_info));
392 atomic_dec(&ctrl_info->num_blocked_threads);
395 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10
397 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
399 unsigned long start_jiffies;
400 unsigned long warning_timeout;
401 bool displayed_warning;
403 displayed_warning = false;
404 start_jiffies = jiffies;
405 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
407 while (atomic_read(&ctrl_info->num_busy_threads) >
408 atomic_read(&ctrl_info->num_blocked_threads)) {
409 if (time_after(jiffies, warning_timeout)) {
410 dev_warn(&ctrl_info->pci_dev->dev,
411 "waiting %u seconds for driver activity to quiesce\n",
412 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
413 displayed_warning = true;
414 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies;
416 usleep_range(1000, 2000);
419 if (displayed_warning)
420 dev_warn(&ctrl_info->pci_dev->dev,
421 "driver activity quiesced after waiting for %u seconds\n",
422 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
425 static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
427 return device->device_offline;
430 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
432 mutex_lock(&ctrl_info->ofa_mutex);
435 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
437 mutex_unlock(&ctrl_info->ofa_mutex);
440 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
442 mutex_lock(&ctrl_info->ofa_mutex);
443 mutex_unlock(&ctrl_info->ofa_mutex);
446 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
448 return mutex_is_locked(&ctrl_info->ofa_mutex);
451 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
453 device->in_remove = true;
456 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
458 return device->in_remove;
461 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
463 device->in_reset[lun] = true;
466 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
468 device->in_reset[lun] = false;
471 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
473 return device->in_reset[lun];
476 static inline int pqi_event_type_to_event_index(unsigned int event_type)
480 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
481 if (event_type == pqi_supported_event_types[index])
487 static inline bool pqi_is_supported_event(unsigned int event_type)
489 return pqi_event_type_to_event_index(event_type) != -1;
492 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
495 if (pqi_ctrl_offline(ctrl_info))
498 schedule_delayed_work(&ctrl_info->rescan_work, delay);
501 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
503 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
506 #define PQI_RESCAN_WORK_DELAY (10 * HZ)
508 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
510 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
513 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
515 cancel_delayed_work_sync(&ctrl_info->rescan_work);
518 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
520 if (!ctrl_info->heartbeat_counter)
523 return readl(ctrl_info->heartbeat_counter);
526 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
528 return readb(ctrl_info->soft_reset_status);
531 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
535 status = pqi_read_soft_reset_status(ctrl_info);
536 status &= ~PQI_SOFT_RESET_ABORT;
537 writeb(status, ctrl_info->soft_reset_status);
540 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
545 io_high_prio = false;
547 if (device->ncq_prio_enable) {
549 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
550 if (priority_class == IOPRIO_CLASS_RT) {
551 /* Set NCQ priority for read/write commands. */
552 switch (scmd->cmnd[0]) {
570 static int pqi_map_single(struct pci_dev *pci_dev,
571 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
572 size_t buffer_length, enum dma_data_direction data_direction)
574 dma_addr_t bus_address;
576 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
579 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
581 if (dma_mapping_error(&pci_dev->dev, bus_address))
584 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
585 put_unaligned_le32(buffer_length, &sg_descriptor->length);
586 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
591 static void pqi_pci_unmap(struct pci_dev *pci_dev,
592 struct pqi_sg_descriptor *descriptors, int num_descriptors,
593 enum dma_data_direction data_direction)
597 if (data_direction == DMA_NONE)
600 for (i = 0; i < num_descriptors; i++)
601 dma_unmap_single(&pci_dev->dev,
602 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
603 get_unaligned_le32(&descriptors[i].length),
607 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
608 struct pqi_raid_path_request *request, u8 cmd,
609 u8 *scsi3addr, void *buffer, size_t buffer_length,
610 u16 vpd_page, enum dma_data_direction *dir)
613 size_t cdb_length = buffer_length;
615 memset(request, 0, sizeof(*request));
617 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
618 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
619 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
620 &request->header.iu_length);
621 put_unaligned_le32(buffer_length, &request->buffer_length);
622 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
623 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
624 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
630 request->data_direction = SOP_READ_FLAG;
632 if (vpd_page & VPD_PAGE) {
634 cdb[2] = (u8)vpd_page;
636 cdb[4] = (u8)cdb_length;
638 case CISS_REPORT_LOG:
639 case CISS_REPORT_PHYS:
640 request->data_direction = SOP_READ_FLAG;
642 if (cmd == CISS_REPORT_PHYS) {
643 if (ctrl_info->rpl_extended_format_4_5_supported)
644 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4;
646 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2;
648 cdb[1] = ctrl_info->ciss_report_log_flags;
650 put_unaligned_be32(cdb_length, &cdb[6]);
652 case CISS_GET_RAID_MAP:
653 request->data_direction = SOP_READ_FLAG;
655 cdb[1] = CISS_GET_RAID_MAP;
656 put_unaligned_be32(cdb_length, &cdb[6]);
659 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST;
660 request->data_direction = SOP_WRITE_FLAG;
662 cdb[6] = BMIC_FLUSH_CACHE;
663 put_unaligned_be16(cdb_length, &cdb[7]);
665 case BMIC_SENSE_DIAG_OPTIONS:
668 case BMIC_IDENTIFY_CONTROLLER:
669 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
670 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
671 case BMIC_SENSE_FEATURE:
672 request->data_direction = SOP_READ_FLAG;
675 put_unaligned_be16(cdb_length, &cdb[7]);
677 case BMIC_SET_DIAG_OPTIONS:
680 case BMIC_WRITE_HOST_WELLNESS:
681 request->data_direction = SOP_WRITE_FLAG;
684 put_unaligned_be16(cdb_length, &cdb[7]);
686 case BMIC_CSMI_PASSTHRU:
687 request->data_direction = SOP_BIDIRECTIONAL;
689 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU;
691 put_unaligned_be16(cdb_length, &cdb[7]);
694 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
698 switch (request->data_direction) {
700 *dir = DMA_FROM_DEVICE;
703 *dir = DMA_TO_DEVICE;
705 case SOP_NO_DIRECTION_FLAG:
709 *dir = DMA_BIDIRECTIONAL;
713 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
714 buffer, buffer_length, *dir);
717 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
719 io_request->scmd = NULL;
720 io_request->status = 0;
721 io_request->error_info = NULL;
722 io_request->raid_bypass = false;
725 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
727 struct pqi_io_request *io_request;
730 if (scmd) { /* SML I/O request */
731 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
733 i = blk_mq_unique_tag_to_tag(blk_tag);
734 io_request = &ctrl_info->io_request_pool[i];
735 if (atomic_inc_return(&io_request->refcount) > 1) {
736 atomic_dec(&io_request->refcount);
739 } else { /* IOCTL or driver internal request */
741 * benignly racy - may have to wait for an open slot.
742 * command slot range is scsi_ml_can_queue -
743 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)]
747 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i];
748 if (atomic_inc_return(&io_request->refcount) == 1)
750 atomic_dec(&io_request->refcount);
751 i = (i + 1) % PQI_RESERVED_IO_SLOTS;
756 pqi_reinit_io_request(io_request);
761 static void pqi_free_io_request(struct pqi_io_request *io_request)
763 atomic_dec(&io_request->refcount);
766 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
767 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
768 struct pqi_raid_error_info *error_info)
771 struct pqi_raid_path_request request;
772 enum dma_data_direction dir;
774 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr,
775 buffer, buffer_length, vpd_page, &dir);
779 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info);
781 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
786 /* helper functions for pqi_send_scsi_raid_request */
788 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
789 u8 cmd, void *buffer, size_t buffer_length)
791 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
792 buffer, buffer_length, 0, NULL);
795 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
796 u8 cmd, void *buffer, size_t buffer_length,
797 struct pqi_raid_error_info *error_info)
799 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID,
800 buffer, buffer_length, 0, error_info);
803 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
804 struct bmic_identify_controller *buffer)
806 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER,
807 buffer, sizeof(*buffer));
810 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info,
811 struct bmic_sense_subsystem_info *sense_info)
813 return pqi_send_ctrl_raid_request(ctrl_info,
814 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info,
815 sizeof(*sense_info));
818 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
819 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
821 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr,
822 buffer, buffer_length, vpd_page, NULL);
825 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
826 struct pqi_scsi_dev *device,
827 struct bmic_identify_physical_device *buffer, size_t buffer_length)
830 enum dma_data_direction dir;
831 u16 bmic_device_index;
832 struct pqi_raid_path_request request;
834 rc = pqi_build_raid_path_request(ctrl_info, &request,
835 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
836 buffer_length, 0, &dir);
840 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
841 request.cdb[2] = (u8)bmic_device_index;
842 request.cdb[9] = (u8)(bmic_device_index >> 8);
844 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
846 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
851 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
855 bytes = get_unaligned_le16(limit);
866 struct bmic_sense_feature_buffer {
867 struct bmic_sense_feature_buffer_header header;
868 struct bmic_sense_feature_io_page_aio_subpage aio_subpage;
873 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \
874 offsetofend(struct bmic_sense_feature_buffer, \
875 aio_subpage.max_write_raid_1_10_3drive)
877 #define MINIMUM_AIO_SUBPAGE_LENGTH \
878 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \
879 max_write_raid_1_10_3drive) - \
880 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header))
882 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
885 enum dma_data_direction dir;
886 struct pqi_raid_path_request request;
887 struct bmic_sense_feature_buffer *buffer;
889 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL);
893 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID,
894 buffer, sizeof(*buffer), 0, &dir);
898 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE;
899 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE;
901 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
903 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
908 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE ||
909 buffer->header.subpage_code !=
910 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
911 get_unaligned_le16(&buffer->header.buffer_length) <
912 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH ||
913 buffer->aio_subpage.header.page_code !=
914 BMIC_SENSE_FEATURE_IO_PAGE ||
915 buffer->aio_subpage.header.subpage_code !=
916 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE ||
917 get_unaligned_le16(&buffer->aio_subpage.header.page_length) <
918 MINIMUM_AIO_SUBPAGE_LENGTH) {
922 ctrl_info->max_transfer_encrypted_sas_sata =
923 pqi_aio_limit_to_bytes(
924 &buffer->aio_subpage.max_transfer_encrypted_sas_sata);
926 ctrl_info->max_transfer_encrypted_nvme =
927 pqi_aio_limit_to_bytes(
928 &buffer->aio_subpage.max_transfer_encrypted_nvme);
930 ctrl_info->max_write_raid_5_6 =
931 pqi_aio_limit_to_bytes(
932 &buffer->aio_subpage.max_write_raid_5_6);
934 ctrl_info->max_write_raid_1_10_2drive =
935 pqi_aio_limit_to_bytes(
936 &buffer->aio_subpage.max_write_raid_1_10_2drive);
938 ctrl_info->max_write_raid_1_10_3drive =
939 pqi_aio_limit_to_bytes(
940 &buffer->aio_subpage.max_write_raid_1_10_3drive);
948 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
949 enum bmic_flush_cache_shutdown_event shutdown_event)
952 struct bmic_flush_cache *flush_cache;
954 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL);
958 flush_cache->shutdown_event = shutdown_event;
960 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache,
961 sizeof(*flush_cache));
968 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
969 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
970 struct pqi_raid_error_info *error_info)
972 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU,
973 buffer, buffer_length, error_info);
976 #define PQI_FETCH_PTRAID_DATA (1 << 31)
978 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
981 struct bmic_diag_options *diag;
983 diag = kzalloc(sizeof(*diag), GFP_KERNEL);
987 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS,
988 diag, sizeof(*diag));
992 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA);
994 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag,
1003 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
1004 void *buffer, size_t buffer_length)
1006 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS,
1007 buffer, buffer_length);
1012 struct bmic_host_wellness_driver_version {
1014 u8 driver_version_tag[2];
1015 __le16 driver_version_length;
1016 char driver_version[32];
1017 u8 dont_write_tag[2];
1023 static int pqi_write_driver_version_to_host_wellness(
1024 struct pqi_ctrl_info *ctrl_info)
1027 struct bmic_host_wellness_driver_version *buffer;
1028 size_t buffer_length;
1030 buffer_length = sizeof(*buffer);
1032 buffer = kmalloc(buffer_length, GFP_KERNEL);
1036 buffer->start_tag[0] = '<';
1037 buffer->start_tag[1] = 'H';
1038 buffer->start_tag[2] = 'W';
1039 buffer->start_tag[3] = '>';
1040 buffer->driver_version_tag[0] = 'D';
1041 buffer->driver_version_tag[1] = 'V';
1042 put_unaligned_le16(sizeof(buffer->driver_version),
1043 &buffer->driver_version_length);
1044 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION,
1045 sizeof(buffer->driver_version));
1046 buffer->dont_write_tag[0] = 'D';
1047 buffer->dont_write_tag[1] = 'W';
1048 buffer->end_tag[0] = 'Z';
1049 buffer->end_tag[1] = 'Z';
1051 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1060 struct bmic_host_wellness_time {
1065 u8 dont_write_tag[2];
1071 static int pqi_write_current_time_to_host_wellness(
1072 struct pqi_ctrl_info *ctrl_info)
1075 struct bmic_host_wellness_time *buffer;
1076 size_t buffer_length;
1077 time64_t local_time;
1081 buffer_length = sizeof(*buffer);
1083 buffer = kmalloc(buffer_length, GFP_KERNEL);
1087 buffer->start_tag[0] = '<';
1088 buffer->start_tag[1] = 'H';
1089 buffer->start_tag[2] = 'W';
1090 buffer->start_tag[3] = '>';
1091 buffer->time_tag[0] = 'T';
1092 buffer->time_tag[1] = 'D';
1093 put_unaligned_le16(sizeof(buffer->time),
1094 &buffer->time_length);
1096 local_time = ktime_get_real_seconds();
1097 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
1098 year = tm.tm_year + 1900;
1100 buffer->time[0] = bin2bcd(tm.tm_hour);
1101 buffer->time[1] = bin2bcd(tm.tm_min);
1102 buffer->time[2] = bin2bcd(tm.tm_sec);
1103 buffer->time[3] = 0;
1104 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
1105 buffer->time[5] = bin2bcd(tm.tm_mday);
1106 buffer->time[6] = bin2bcd(year / 100);
1107 buffer->time[7] = bin2bcd(year % 100);
1109 buffer->dont_write_tag[0] = 'D';
1110 buffer->dont_write_tag[1] = 'W';
1111 buffer->end_tag[0] = 'Z';
1112 buffer->end_tag[1] = 'Z';
1114 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
1121 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
1123 static void pqi_update_time_worker(struct work_struct *work)
1126 struct pqi_ctrl_info *ctrl_info;
1128 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1131 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
1133 dev_warn(&ctrl_info->pci_dev->dev,
1134 "error updating time on controller\n");
1136 schedule_delayed_work(&ctrl_info->update_time_work,
1137 PQI_UPDATE_TIME_WORK_INTERVAL);
1140 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1142 schedule_delayed_work(&ctrl_info->update_time_work, 0);
1145 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
1147 cancel_delayed_work_sync(&ctrl_info->update_time_work);
1150 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
1151 size_t buffer_length)
1153 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length);
1156 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
1159 size_t lun_list_length;
1160 size_t lun_data_length;
1161 size_t new_lun_list_length;
1162 void *lun_data = NULL;
1163 struct report_lun_header *report_lun_header;
1165 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
1166 if (!report_lun_header) {
1171 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header));
1175 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
1178 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
1180 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
1186 if (lun_list_length == 0) {
1187 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
1191 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
1195 new_lun_list_length =
1196 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length);
1198 if (new_lun_list_length > lun_list_length) {
1199 lun_list_length = new_lun_list_length;
1205 kfree(report_lun_header);
1217 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1221 u8 rpl_response_format;
1224 struct report_lun_header *rpl_header;
1225 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list;
1226 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list;
1228 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list);
1232 if (ctrl_info->rpl_extended_format_4_5_supported) {
1233 rpl_header = rpl_list;
1234 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK;
1235 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) {
1238 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) {
1239 dev_err(&ctrl_info->pci_dev->dev,
1240 "RPL returned unsupported data format %u\n",
1241 rpl_response_format);
1244 dev_warn(&ctrl_info->pci_dev->dev,
1245 "RPL returned extended format 2 instead of 4\n");
1249 rpl_8byte_wwid_list = rpl_list;
1250 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]);
1252 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries,
1253 num_physicals), GFP_KERNEL);
1254 if (!rpl_16byte_wwid_list)
1257 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid),
1258 &rpl_16byte_wwid_list->header.list_length);
1259 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags;
1261 for (i = 0; i < num_physicals; i++) {
1262 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid));
1263 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid));
1264 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8);
1265 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type;
1266 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags;
1267 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count;
1268 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths;
1269 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle;
1272 kfree(rpl_8byte_wwid_list);
1273 *buffer = rpl_16byte_wwid_list;
1278 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
1280 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
1283 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
1284 struct report_phys_lun_16byte_wwid_list **physdev_list,
1285 struct report_log_lun_list **logdev_list)
1288 size_t logdev_list_length;
1289 size_t logdev_data_length;
1290 struct report_log_lun_list *internal_logdev_list;
1291 struct report_log_lun_list *logdev_data;
1292 struct report_lun_header report_lun_header;
1294 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
1296 dev_err(&ctrl_info->pci_dev->dev,
1297 "report physical LUNs failed\n");
1299 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
1301 dev_err(&ctrl_info->pci_dev->dev,
1302 "report logical LUNs failed\n");
1305 * Tack the controller itself onto the end of the logical device list
1306 * by adding a list entry that is all zeros.
1309 logdev_data = *logdev_list;
1312 logdev_list_length =
1313 get_unaligned_be32(&logdev_data->header.list_length);
1315 memset(&report_lun_header, 0, sizeof(report_lun_header));
1317 (struct report_log_lun_list *)&report_lun_header;
1318 logdev_list_length = 0;
1321 logdev_data_length = sizeof(struct report_lun_header) +
1324 internal_logdev_list = kmalloc(logdev_data_length +
1325 sizeof(struct report_log_lun), GFP_KERNEL);
1326 if (!internal_logdev_list) {
1327 kfree(*logdev_list);
1328 *logdev_list = NULL;
1332 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
1333 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
1334 sizeof(struct report_log_lun));
1335 put_unaligned_be32(logdev_list_length +
1336 sizeof(struct report_log_lun),
1337 &internal_logdev_list->header.list_length);
1339 kfree(*logdev_list);
1340 *logdev_list = internal_logdev_list;
1345 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
1346 int bus, int target, int lun)
1349 device->target = target;
1353 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
1361 scsi3addr = device->scsi3addr;
1362 lunid = get_unaligned_le32(scsi3addr);
1364 if (pqi_is_hba_lunid(scsi3addr)) {
1365 /* The specified device is the controller. */
1366 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
1367 device->target_lun_valid = true;
1371 if (pqi_is_logical_device(device)) {
1372 if (device->is_external_raid_device) {
1373 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
1374 target = (lunid >> 16) & 0x3fff;
1377 bus = PQI_RAID_VOLUME_BUS;
1379 lun = lunid & 0x3fff;
1381 pqi_set_bus_target_lun(device, bus, target, lun);
1382 device->target_lun_valid = true;
1387 * Defer target and LUN assignment for non-controller physical devices
1388 * because the SAS transport layer will make these assignments later.
1390 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
1393 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
1394 struct pqi_scsi_dev *device)
1400 raid_level = SA_RAID_UNKNOWN;
1402 buffer = kmalloc(64, GFP_KERNEL);
1404 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1405 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
1407 raid_level = buffer[8];
1408 if (raid_level > SA_RAID_MAX)
1409 raid_level = SA_RAID_UNKNOWN;
1414 device->raid_level = raid_level;
1417 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
1418 struct pqi_scsi_dev *device, struct raid_map *raid_map)
1422 u32 r5or6_blocks_per_row;
1424 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1426 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
1427 err_msg = "RAID map too small";
1431 if (device->raid_level == SA_RAID_1) {
1432 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
1433 err_msg = "invalid RAID-1 map";
1436 } else if (device->raid_level == SA_RAID_TRIPLE) {
1437 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
1438 err_msg = "invalid RAID-1(Triple) map";
1441 } else if ((device->raid_level == SA_RAID_5 ||
1442 device->raid_level == SA_RAID_6) &&
1443 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
1445 r5or6_blocks_per_row =
1446 get_unaligned_le16(&raid_map->strip_size) *
1447 get_unaligned_le16(&raid_map->data_disks_per_row);
1448 if (r5or6_blocks_per_row == 0) {
1449 err_msg = "invalid RAID-5 or RAID-6 map";
1457 dev_warn(&ctrl_info->pci_dev->dev,
1458 "logical device %08x%08x %s\n",
1459 *((u32 *)&device->scsi3addr),
1460 *((u32 *)&device->scsi3addr[4]), err_msg);
1465 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1466 struct pqi_scsi_dev *device)
1470 struct raid_map *raid_map;
1472 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1476 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1477 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL);
1481 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
1483 if (raid_map_size > sizeof(*raid_map)) {
1487 raid_map = kmalloc(raid_map_size, GFP_KERNEL);
1491 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP,
1492 device->scsi3addr, raid_map, raid_map_size, 0, NULL);
1496 if (get_unaligned_le32(&raid_map->structure_size)
1498 dev_warn(&ctrl_info->pci_dev->dev,
1499 "requested %u bytes, received %u bytes\n",
1501 get_unaligned_le32(&raid_map->structure_size));
1507 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1511 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats);
1512 if (!device->raid_io_stats) {
1517 device->raid_map = raid_map;
1527 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
1528 struct pqi_scsi_dev *device)
1530 if (!ctrl_info->lv_drive_type_mix_valid) {
1531 device->max_transfer_encrypted = ~0;
1535 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) {
1536 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY:
1537 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY:
1538 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY:
1539 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY:
1540 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY:
1541 case LV_DRIVE_TYPE_MIX_SAS_ONLY:
1542 case LV_DRIVE_TYPE_MIX_SATA_ONLY:
1543 device->max_transfer_encrypted =
1544 ctrl_info->max_transfer_encrypted_sas_sata;
1546 case LV_DRIVE_TYPE_MIX_NVME_ONLY:
1547 device->max_transfer_encrypted =
1548 ctrl_info->max_transfer_encrypted_nvme;
1550 case LV_DRIVE_TYPE_MIX_UNKNOWN:
1551 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION:
1553 device->max_transfer_encrypted =
1554 min(ctrl_info->max_transfer_encrypted_sas_sata,
1555 ctrl_info->max_transfer_encrypted_nvme);
1560 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
1561 struct pqi_scsi_dev *device)
1567 buffer = kmalloc(64, GFP_KERNEL);
1571 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1572 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
1576 #define RAID_BYPASS_STATUS 4
1577 #define RAID_BYPASS_CONFIGURED 0x1
1578 #define RAID_BYPASS_ENABLED 0x2
1580 bypass_status = buffer[RAID_BYPASS_STATUS];
1581 device->raid_bypass_configured =
1582 (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
1583 if (device->raid_bypass_configured &&
1584 (bypass_status & RAID_BYPASS_ENABLED) &&
1585 pqi_get_raid_map(ctrl_info, device) == 0) {
1586 device->raid_bypass_enabled = true;
1587 if (get_unaligned_le16(&device->raid_map->flags) &
1588 RAID_MAP_ENCRYPTION_ENABLED)
1589 pqi_set_max_transfer_encrypted(ctrl_info, device);
1597 * Use vendor-specific VPD to determine online/offline status of a volume.
1600 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1601 struct pqi_scsi_dev *device)
1605 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1606 bool volume_offline = true;
1608 struct ciss_vpd_logical_volume_status *vpd;
1610 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1614 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1615 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1619 if (vpd->page_code != CISS_VPD_LV_STATUS)
1622 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1623 volume_status) + vpd->page_length;
1624 if (page_length < sizeof(*vpd))
1627 volume_status = vpd->volume_status;
1628 volume_flags = get_unaligned_be32(&vpd->flags);
1629 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1634 device->volume_status = volume_status;
1635 device->volume_offline = volume_offline;
1638 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
1639 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
1640 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
1642 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
1643 struct pqi_scsi_dev *device,
1644 struct bmic_identify_physical_device *id_phys)
1648 memset(id_phys, 0, sizeof(*id_phys));
1650 rc = pqi_identify_physical_device(ctrl_info, device,
1651 id_phys, sizeof(*id_phys));
1653 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1657 scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
1658 scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
1660 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
1661 memcpy(device->model, &id_phys->model[8], sizeof(device->model));
1663 device->box_index = id_phys->box_index;
1664 device->phys_box_on_bus = id_phys->phys_box_on_bus;
1665 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
1666 device->queue_depth =
1667 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1668 device->active_path_index = id_phys->active_path_number;
1669 device->path_map = id_phys->redundant_path_present_map;
1670 memcpy(&device->box,
1671 &id_phys->alternate_paths_phys_box_on_port,
1672 sizeof(device->box));
1673 memcpy(&device->phys_connector,
1674 &id_phys->alternate_paths_phys_connector,
1675 sizeof(device->phys_connector));
1676 device->bay = id_phys->phys_bay_in_box;
1677 device->lun_count = id_phys->multi_lun_device_lun_count;
1678 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) &&
1681 id_phys->phy_to_phy_map[device->active_path_index];
1683 device->phy_id = 0xFF;
1685 device->ncq_prio_support =
1686 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
1687 PQI_DEVICE_NCQ_PRIO_SUPPORTED);
1689 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
1694 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
1695 struct pqi_scsi_dev *device)
1700 buffer = kmalloc(64, GFP_KERNEL);
1704 /* Send an inquiry to the device to see what it is. */
1705 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1709 scsi_sanitize_inquiry_string(&buffer[8], 8);
1710 scsi_sanitize_inquiry_string(&buffer[16], 16);
1712 device->devtype = buffer[0] & 0x1f;
1713 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1714 memcpy(device->model, &buffer[16], sizeof(device->model));
1716 if (device->devtype == TYPE_DISK) {
1717 if (device->is_external_raid_device) {
1718 device->raid_level = SA_RAID_UNKNOWN;
1719 device->volume_status = CISS_LV_OK;
1720 device->volume_offline = false;
1722 pqi_get_raid_level(ctrl_info, device);
1723 pqi_get_raid_bypass_status(ctrl_info, device);
1724 pqi_get_volume_status(ctrl_info, device);
1735 * Prevent adding drive to OS for some corner cases such as a drive
1736 * undergoing a sanitize (erase) operation. Some OSes will continue to poll
1737 * the drive until the sanitize completes, which can take hours,
1738 * resulting in long bootup delays. Commands such as TUR, READ_CAP
1739 * are allowed, but READ/WRITE cause check condition. So the OS
1740 * cannot check/read the partition table.
1741 * Note: devices that have completed sanitize must be re-enabled
1742 * using the management utility.
1744 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
1746 return device->erase_in_progress;
1749 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
1750 struct pqi_scsi_dev *device,
1751 struct bmic_identify_physical_device *id_phys)
1755 if (device->is_expander_smp_device)
1758 if (pqi_is_logical_device(device))
1759 rc = pqi_get_logical_device_info(ctrl_info, device);
1761 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
1766 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1767 struct pqi_scsi_dev *device,
1768 struct bmic_identify_physical_device *id_phys)
1772 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys);
1774 if (rc == 0 && device->lun_count == 0)
1775 device->lun_count = 1;
1780 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1781 struct pqi_scsi_dev *device)
1784 static const char unknown_state_str[] =
1785 "Volume is in an unknown state (%u)";
1786 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1788 switch (device->volume_status) {
1790 status = "Volume online";
1792 case CISS_LV_FAILED:
1793 status = "Volume failed";
1795 case CISS_LV_NOT_CONFIGURED:
1796 status = "Volume not configured";
1798 case CISS_LV_DEGRADED:
1799 status = "Volume degraded";
1801 case CISS_LV_READY_FOR_RECOVERY:
1802 status = "Volume ready for recovery operation";
1804 case CISS_LV_UNDERGOING_RECOVERY:
1805 status = "Volume undergoing recovery";
1807 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1808 status = "Wrong physical drive was replaced";
1810 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1811 status = "A physical drive not properly connected";
1813 case CISS_LV_HARDWARE_OVERHEATING:
1814 status = "Hardware is overheating";
1816 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1817 status = "Hardware has overheated";
1819 case CISS_LV_UNDERGOING_EXPANSION:
1820 status = "Volume undergoing expansion";
1822 case CISS_LV_NOT_AVAILABLE:
1823 status = "Volume waiting for transforming volume";
1825 case CISS_LV_QUEUED_FOR_EXPANSION:
1826 status = "Volume queued for expansion";
1828 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1829 status = "Volume disabled due to SCSI ID conflict";
1831 case CISS_LV_EJECTED:
1832 status = "Volume has been ejected";
1834 case CISS_LV_UNDERGOING_ERASE:
1835 status = "Volume undergoing background erase";
1837 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1838 status = "Volume ready for predictive spare rebuild";
1840 case CISS_LV_UNDERGOING_RPI:
1841 status = "Volume undergoing rapid parity initialization";
1843 case CISS_LV_PENDING_RPI:
1844 status = "Volume queued for rapid parity initialization";
1846 case CISS_LV_ENCRYPTED_NO_KEY:
1847 status = "Encrypted volume inaccessible - key not present";
1849 case CISS_LV_UNDERGOING_ENCRYPTION:
1850 status = "Volume undergoing encryption process";
1852 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1853 status = "Volume undergoing encryption re-keying process";
1855 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1856 status = "Volume encrypted but encryption is disabled";
1858 case CISS_LV_PENDING_ENCRYPTION:
1859 status = "Volume pending migration to encrypted state";
1861 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1862 status = "Volume pending encryption rekeying";
1864 case CISS_LV_NOT_SUPPORTED:
1865 status = "Volume not supported on this controller";
1867 case CISS_LV_STATUS_UNAVAILABLE:
1868 status = "Volume status not available";
1871 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1872 unknown_state_str, device->volume_status);
1873 status = unknown_state_buffer;
1877 dev_info(&ctrl_info->pci_dev->dev,
1878 "scsi %d:%d:%d:%d %s\n",
1879 ctrl_info->scsi_host->host_no,
1880 device->bus, device->target, device->lun, status);
1883 static void pqi_rescan_worker(struct work_struct *work)
1885 struct pqi_ctrl_info *ctrl_info;
1887 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1890 pqi_scan_scsi_devices(ctrl_info);
1893 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1894 struct pqi_scsi_dev *device)
1898 if (pqi_is_logical_device(device))
1899 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1900 device->target, device->lun);
1902 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1907 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000)
1909 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
1914 for (lun = 0; lun < device->lun_count; lun++) {
1915 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun,
1916 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS);
1918 dev_err(&ctrl_info->pci_dev->dev,
1919 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1920 ctrl_info->scsi_host->host_no, device->bus,
1921 device->target, lun,
1922 atomic_read(&device->scsi_cmds_outstanding[lun]));
1925 if (pqi_is_logical_device(device))
1926 scsi_remove_device(device->sdev);
1928 pqi_remove_sas_device(device);
1930 pqi_device_remove_start(device);
1933 /* Assumes the SCSI device list lock is held. */
1935 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1936 int bus, int target, int lun)
1938 struct pqi_scsi_dev *device;
1940 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
1941 if (device->bus == bus && device->target == target && device->lun == lun)
1947 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
1949 if (dev1->is_physical_device != dev2->is_physical_device)
1952 if (dev1->is_physical_device)
1953 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0;
1955 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0;
1958 enum pqi_find_result {
1964 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1965 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
1967 struct pqi_scsi_dev *device;
1969 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
1970 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
1971 *matching_device = device;
1972 if (pqi_device_equal(device_to_find, device)) {
1973 if (device_to_find->volume_offline)
1974 return DEVICE_CHANGED;
1977 return DEVICE_CHANGED;
1981 return DEVICE_NOT_FOUND;
1984 static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
1986 if (device->is_expander_smp_device)
1987 return "Enclosure SMP ";
1989 return scsi_device_type(device->devtype);
1992 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1994 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1995 char *action, struct pqi_scsi_dev *device)
1998 char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
2000 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
2001 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
2003 if (device->target_lun_valid)
2004 count += scnprintf(buffer + count,
2005 PQI_DEV_INFO_BUFFER_LENGTH - count,
2010 count += scnprintf(buffer + count,
2011 PQI_DEV_INFO_BUFFER_LENGTH - count,
2014 if (pqi_is_logical_device(device))
2015 count += scnprintf(buffer + count,
2016 PQI_DEV_INFO_BUFFER_LENGTH - count,
2018 *((u32 *)&device->scsi3addr),
2019 *((u32 *)&device->scsi3addr[4]));
2021 count += scnprintf(buffer + count,
2022 PQI_DEV_INFO_BUFFER_LENGTH - count,
2024 get_unaligned_be64(&device->wwid[0]),
2025 get_unaligned_be64(&device->wwid[8]));
2027 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
2029 pqi_device_type(device),
2033 if (pqi_is_logical_device(device)) {
2034 if (device->devtype == TYPE_DISK)
2035 count += scnprintf(buffer + count,
2036 PQI_DEV_INFO_BUFFER_LENGTH - count,
2037 "SSDSmartPathCap%c En%c %-12s",
2038 device->raid_bypass_configured ? '+' : '-',
2039 device->raid_bypass_enabled ? '+' : '-',
2040 pqi_raid_level_to_string(device->raid_level));
2042 count += scnprintf(buffer + count,
2043 PQI_DEV_INFO_BUFFER_LENGTH - count,
2044 "AIO%c", device->aio_enabled ? '+' : '-');
2045 if (device->devtype == TYPE_DISK ||
2046 device->devtype == TYPE_ZBC)
2047 count += scnprintf(buffer + count,
2048 PQI_DEV_INFO_BUFFER_LENGTH - count,
2049 " qd=%-6d", device->queue_depth);
2052 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
2055 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
2060 if (raid_map1 == NULL || raid_map2 == NULL)
2061 return raid_map1 == raid_map2;
2063 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size);
2064 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size);
2066 if (raid_map1_size != raid_map2_size)
2069 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0;
2072 /* Assumes the SCSI device list lock is held. */
2074 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
2075 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
2077 existing_device->device_type = new_device->device_type;
2078 existing_device->bus = new_device->bus;
2079 if (new_device->target_lun_valid) {
2080 existing_device->target = new_device->target;
2081 existing_device->lun = new_device->lun;
2082 existing_device->target_lun_valid = true;
2085 /* By definition, the scsi3addr and wwid fields are already the same. */
2087 existing_device->is_physical_device = new_device->is_physical_device;
2088 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor));
2089 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model));
2090 existing_device->sas_address = new_device->sas_address;
2091 existing_device->queue_depth = new_device->queue_depth;
2092 existing_device->device_offline = false;
2093 existing_device->lun_count = new_device->lun_count;
2095 if (pqi_is_logical_device(existing_device)) {
2096 existing_device->is_external_raid_device = new_device->is_external_raid_device;
2098 if (existing_device->devtype == TYPE_DISK) {
2099 existing_device->raid_level = new_device->raid_level;
2100 existing_device->volume_status = new_device->volume_status;
2101 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group));
2102 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) {
2103 kfree(existing_device->raid_map);
2104 existing_device->raid_map = new_device->raid_map;
2105 /* To prevent this from being freed later. */
2106 new_device->raid_map = NULL;
2108 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) {
2109 existing_device->raid_io_stats = new_device->raid_io_stats;
2110 new_device->raid_io_stats = NULL;
2112 existing_device->raid_bypass_configured = new_device->raid_bypass_configured;
2113 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled;
2116 existing_device->aio_enabled = new_device->aio_enabled;
2117 existing_device->aio_handle = new_device->aio_handle;
2118 existing_device->is_expander_smp_device = new_device->is_expander_smp_device;
2119 existing_device->active_path_index = new_device->active_path_index;
2120 existing_device->phy_id = new_device->phy_id;
2121 existing_device->path_map = new_device->path_map;
2122 existing_device->bay = new_device->bay;
2123 existing_device->box_index = new_device->box_index;
2124 existing_device->phys_box_on_bus = new_device->phys_box_on_bus;
2125 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type;
2126 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box));
2127 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector));
2131 static inline void pqi_free_device(struct pqi_scsi_dev *device)
2134 free_percpu(device->raid_io_stats);
2135 kfree(device->raid_map);
2141 * Called when exposing a new device to the OS fails in order to re-adjust
2142 * our internal SCSI device list to match the SCSI ML's view.
2145 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
2146 struct pqi_scsi_dev *device)
2148 unsigned long flags;
2150 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2151 list_del(&device->scsi_device_list_entry);
2152 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2154 /* Allow the device structure to be freed later. */
2155 device->keep_device = false;
2158 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
2160 if (device->is_expander_smp_device)
2161 return device->sas_port != NULL;
2163 return device->sdev != NULL;
2166 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
2169 struct pqi_tmf_work *tmf_work;
2171 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++)
2172 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker);
2175 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
2177 if (pqi_device_in_remove(device))
2180 if (device->sdev == NULL)
2183 if (!scsi_device_online(device->sdev))
2186 return device->rescan;
2189 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
2190 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
2194 unsigned long flags;
2195 enum pqi_find_result find_result;
2196 struct pqi_scsi_dev *device;
2197 struct pqi_scsi_dev *next;
2198 struct pqi_scsi_dev *matching_device;
2199 LIST_HEAD(add_list);
2200 LIST_HEAD(delete_list);
2203 * The idea here is to do as little work as possible while holding the
2204 * spinlock. That's why we go to great pains to defer anything other
2205 * than updating the internal device list until after we release the
2209 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2211 /* Assume that all devices in the existing list have gone away. */
2212 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
2213 device->device_gone = true;
2215 for (i = 0; i < num_new_devices; i++) {
2216 device = new_device_list[i];
2218 find_result = pqi_scsi_find_entry(ctrl_info, device,
2221 switch (find_result) {
2224 * The newly found device is already in the existing
2227 device->new_device = false;
2228 matching_device->device_gone = false;
2229 pqi_scsi_update_device(ctrl_info, matching_device, device);
2231 case DEVICE_NOT_FOUND:
2233 * The newly found device is NOT in the existing device
2236 device->new_device = true;
2238 case DEVICE_CHANGED:
2240 * The original device has gone away and we need to add
2243 device->new_device = true;
2248 /* Process all devices that have gone away. */
2249 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
2250 scsi_device_list_entry) {
2251 if (device->device_gone) {
2252 list_del(&device->scsi_device_list_entry);
2253 list_add_tail(&device->delete_list_entry, &delete_list);
2257 /* Process all new devices. */
2258 for (i = 0; i < num_new_devices; i++) {
2259 device = new_device_list[i];
2260 if (!device->new_device)
2262 if (device->volume_offline)
2264 list_add_tail(&device->scsi_device_list_entry,
2265 &ctrl_info->scsi_device_list);
2266 list_add_tail(&device->add_list_entry, &add_list);
2267 /* To prevent this device structure from being freed later. */
2268 device->keep_device = true;
2269 pqi_init_device_tmf_work(device);
2272 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2275 * If OFA is in progress and there are devices that need to be deleted,
2276 * allow any pending reset operations to continue and unblock any SCSI
2277 * requests before removal.
2279 if (pqi_ofa_in_progress(ctrl_info)) {
2280 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry)
2281 if (pqi_is_device_added(device))
2282 pqi_device_remove_start(device);
2283 pqi_ctrl_unblock_device_reset(ctrl_info);
2284 pqi_scsi_unblock_requests(ctrl_info);
2287 /* Remove all devices that have gone away. */
2288 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
2289 if (device->volume_offline) {
2290 pqi_dev_info(ctrl_info, "offline", device);
2291 pqi_show_volume_status(ctrl_info, device);
2293 pqi_dev_info(ctrl_info, "removed", device);
2295 if (pqi_is_device_added(device))
2296 pqi_remove_device(ctrl_info, device);
2297 list_del(&device->delete_list_entry);
2298 pqi_free_device(device);
2302 * Notify the SML of any existing device changes such as;
2303 * queue depth, device size.
2305 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
2307 * Check for queue depth change.
2309 if (device->sdev && device->queue_depth != device->advertised_queue_depth) {
2310 device->advertised_queue_depth = device->queue_depth;
2311 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
2313 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
2315 * Check for changes in the device, such as size.
2317 if (pqi_volume_rescan_needed(device)) {
2318 device->rescan = false;
2319 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2320 scsi_rescan_device(device->sdev);
2322 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
2326 /* Expose any new devices. */
2327 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
2328 if (!pqi_is_device_added(device)) {
2329 rc = pqi_add_device(ctrl_info, device);
2331 pqi_dev_info(ctrl_info, "added", device);
2333 dev_warn(&ctrl_info->pci_dev->dev,
2334 "scsi %d:%d:%d:%d addition failed, device not added\n",
2335 ctrl_info->scsi_host->host_no,
2336 device->bus, device->target,
2338 pqi_fixup_botched_add(ctrl_info, device);
2345 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
2348 * Only support the HBA controller itself as a RAID
2349 * controller. If it's a RAID controller other than
2350 * the HBA itself (an external RAID controller, for
2351 * example), we don't support it.
2353 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
2354 !pqi_is_hba_lunid(device->scsi3addr))
2360 static inline bool pqi_skip_device(u8 *scsi3addr)
2362 /* Ignore all masked devices. */
2363 if (MASKED_DEVICE(scsi3addr))
2369 static inline void pqi_mask_device(u8 *scsi3addr)
2371 scsi3addr[3] |= 0xc0;
2374 static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
2376 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr);
2379 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2383 LIST_HEAD(new_device_list_head);
2384 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL;
2385 struct report_log_lun_list *logdev_list = NULL;
2386 struct report_phys_lun_16byte_wwid *phys_lun;
2387 struct report_log_lun *log_lun;
2388 struct bmic_identify_physical_device *id_phys = NULL;
2391 struct pqi_scsi_dev **new_device_list = NULL;
2392 struct pqi_scsi_dev *device;
2393 struct pqi_scsi_dev *next;
2394 unsigned int num_new_devices;
2395 unsigned int num_valid_devices;
2396 bool is_physical_device;
2398 unsigned int physical_index;
2399 unsigned int logical_index;
2400 static char *out_of_memory_msg =
2401 "failed to allocate memory, device discovery stopped";
2403 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
2409 get_unaligned_be32(&physdev_list->header.list_length)
2410 / sizeof(physdev_list->lun_entries[0]);
2416 get_unaligned_be32(&logdev_list->header.list_length)
2417 / sizeof(logdev_list->lun_entries[0]);
2421 if (num_physicals) {
2423 * We need this buffer for calls to pqi_get_physical_disk_info()
2424 * below. We allocate it here instead of inside
2425 * pqi_get_physical_disk_info() because it's a fairly large
2428 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
2430 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2436 if (pqi_hide_vsep) {
2437 for (i = num_physicals - 1; i >= 0; i--) {
2438 phys_lun = &physdev_list->lun_entries[i];
2439 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) {
2440 pqi_mask_device(phys_lun->lunid);
2448 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX))
2449 ctrl_info->lv_drive_type_mix_valid = true;
2451 num_new_devices = num_physicals + num_logicals;
2453 new_device_list = kmalloc_array(num_new_devices,
2454 sizeof(*new_device_list),
2456 if (!new_device_list) {
2457 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
2462 for (i = 0; i < num_new_devices; i++) {
2463 device = kzalloc(sizeof(*device), GFP_KERNEL);
2465 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2470 list_add_tail(&device->new_device_list_entry,
2471 &new_device_list_head);
2475 num_valid_devices = 0;
2479 for (i = 0; i < num_new_devices; i++) {
2481 if ((!pqi_expose_ld_first && i < num_physicals) ||
2482 (pqi_expose_ld_first && i >= num_logicals)) {
2483 is_physical_device = true;
2484 phys_lun = &physdev_list->lun_entries[physical_index++];
2486 scsi3addr = phys_lun->lunid;
2488 is_physical_device = false;
2490 log_lun = &logdev_list->lun_entries[logical_index++];
2491 scsi3addr = log_lun->lunid;
2494 if (is_physical_device && pqi_skip_device(scsi3addr))
2498 device = list_next_entry(device, new_device_list_entry);
2500 device = list_first_entry(&new_device_list_head,
2501 struct pqi_scsi_dev, new_device_list_entry);
2503 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
2504 device->is_physical_device = is_physical_device;
2505 if (is_physical_device) {
2506 device->device_type = phys_lun->device_type;
2507 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
2508 device->is_expander_smp_device = true;
2510 device->is_external_raid_device =
2511 pqi_is_external_raid_addr(scsi3addr);
2514 if (!pqi_is_supported_device(device))
2517 /* Gather information about the device. */
2518 rc = pqi_get_device_info(ctrl_info, device, id_phys);
2519 if (rc == -ENOMEM) {
2520 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
2525 if (device->is_physical_device)
2526 dev_warn(&ctrl_info->pci_dev->dev,
2527 "obtaining device info failed, skipping physical device %016llx%016llx\n",
2528 get_unaligned_be64(&phys_lun->wwid[0]),
2529 get_unaligned_be64(&phys_lun->wwid[8]));
2531 dev_warn(&ctrl_info->pci_dev->dev,
2532 "obtaining device info failed, skipping logical device %08x%08x\n",
2533 *((u32 *)&device->scsi3addr),
2534 *((u32 *)&device->scsi3addr[4]));
2539 /* Do not present disks that the OS cannot fully probe. */
2540 if (pqi_keep_device_offline(device))
2543 pqi_assign_bus_target_lun(device);
2545 if (device->is_physical_device) {
2546 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid));
2547 if ((phys_lun->device_flags &
2548 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) &&
2549 phys_lun->aio_handle) {
2550 device->aio_enabled = true;
2551 device->aio_handle =
2552 phys_lun->aio_handle;
2555 memcpy(device->volume_id, log_lun->volume_id,
2556 sizeof(device->volume_id));
2559 device->sas_address = get_unaligned_be64(&device->wwid[0]);
2561 new_device_list[num_valid_devices++] = device;
2564 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
2567 list_for_each_entry_safe(device, next, &new_device_list_head,
2568 new_device_list_entry) {
2569 if (device->keep_device)
2571 list_del(&device->new_device_list_entry);
2572 pqi_free_device(device);
2575 kfree(new_device_list);
2576 kfree(physdev_list);
2583 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
2588 if (pqi_ctrl_offline(ctrl_info))
2591 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
2593 if (!mutex_acquired) {
2594 if (pqi_ctrl_scan_blocked(ctrl_info))
2596 pqi_schedule_rescan_worker_delayed(ctrl_info);
2597 return -EINPROGRESS;
2600 rc = pqi_update_scsi_devices(ctrl_info);
2601 if (rc && !pqi_ctrl_scan_blocked(ctrl_info))
2602 pqi_schedule_rescan_worker_delayed(ctrl_info);
2604 mutex_unlock(&ctrl_info->scan_mutex);
2609 static void pqi_scan_start(struct Scsi_Host *shost)
2611 struct pqi_ctrl_info *ctrl_info;
2613 ctrl_info = shost_to_hba(shost);
2615 pqi_scan_scsi_devices(ctrl_info);
2618 /* Returns TRUE if scan is finished. */
2620 static int pqi_scan_finished(struct Scsi_Host *shost,
2621 unsigned long elapsed_time)
2623 struct pqi_ctrl_info *ctrl_info;
2625 ctrl_info = shost_priv(shost);
2627 return !mutex_is_locked(&ctrl_info->scan_mutex);
2630 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
2631 struct raid_map *raid_map, u64 first_block)
2633 u32 volume_blk_size;
2636 * Set the encryption tweak values based on logical block address.
2637 * If the block size is 512, the tweak value is equal to the LBA.
2638 * For other block sizes, tweak value is (LBA * block size) / 512.
2640 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2641 if (volume_blk_size != 512)
2642 first_block = (first_block * volume_blk_size) / 512;
2644 encryption_info->data_encryption_key_index =
2645 get_unaligned_le16(&raid_map->data_encryption_key_index);
2646 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2647 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2651 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2654 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
2655 struct pqi_scsi_dev_raid_map_data *rmd)
2657 bool is_supported = true;
2659 switch (rmd->raid_level) {
2663 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2664 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2665 is_supported = false;
2667 case SA_RAID_TRIPLE:
2668 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2669 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2670 is_supported = false;
2673 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2674 rmd->data_length > ctrl_info->max_write_raid_5_6))
2675 is_supported = false;
2678 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2679 rmd->data_length > ctrl_info->max_write_raid_5_6))
2680 is_supported = false;
2683 is_supported = false;
2687 return is_supported;
2690 #define PQI_RAID_BYPASS_INELIGIBLE 1
2692 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
2693 struct pqi_scsi_dev_raid_map_data *rmd)
2695 /* Check for valid opcode, get LBA and block count. */
2696 switch (scmd->cmnd[0]) {
2698 rmd->is_write = true;
2701 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2702 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
2703 rmd->block_cnt = (u32)scmd->cmnd[4];
2704 if (rmd->block_cnt == 0)
2705 rmd->block_cnt = 256;
2708 rmd->is_write = true;
2711 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2712 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2715 rmd->is_write = true;
2718 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2719 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2722 rmd->is_write = true;
2725 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2726 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2729 /* Process via normal I/O path. */
2730 return PQI_RAID_BYPASS_INELIGIBLE;
2733 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2738 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
2739 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2741 #if BITS_PER_LONG == 32
2745 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2747 /* Check for invalid block or wraparound. */
2748 if (rmd->last_block >=
2749 get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2750 rmd->last_block < rmd->first_block)
2751 return PQI_RAID_BYPASS_INELIGIBLE;
2753 rmd->data_disks_per_row =
2754 get_unaligned_le16(&raid_map->data_disks_per_row);
2755 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2756 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2758 /* Calculate stripe information for the request. */
2759 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2760 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2761 return PQI_RAID_BYPASS_INELIGIBLE;
2762 #if BITS_PER_LONG == 32
2763 tmpdiv = rmd->first_block;
2764 do_div(tmpdiv, rmd->blocks_per_row);
2765 rmd->first_row = tmpdiv;
2766 tmpdiv = rmd->last_block;
2767 do_div(tmpdiv, rmd->blocks_per_row);
2768 rmd->last_row = tmpdiv;
2769 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2770 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2771 tmpdiv = rmd->first_row_offset;
2772 do_div(tmpdiv, rmd->strip_size);
2773 rmd->first_column = tmpdiv;
2774 tmpdiv = rmd->last_row_offset;
2775 do_div(tmpdiv, rmd->strip_size);
2776 rmd->last_column = tmpdiv;
2778 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2779 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2780 rmd->first_row_offset = (u32)(rmd->first_block -
2781 (rmd->first_row * rmd->blocks_per_row));
2782 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2783 rmd->blocks_per_row));
2784 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2785 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2788 /* If this isn't a single row/column then give to the controller. */
2789 if (rmd->first_row != rmd->last_row ||
2790 rmd->first_column != rmd->last_column)
2791 return PQI_RAID_BYPASS_INELIGIBLE;
2793 /* Proceeding with driver mapping. */
2794 rmd->total_disks_per_row = rmd->data_disks_per_row +
2795 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2796 rmd->map_row = ((u32)(rmd->first_row >>
2797 raid_map->parity_rotation_shift)) %
2798 get_unaligned_le16(&raid_map->row_cnt);
2799 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2805 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2806 struct raid_map *raid_map)
2808 #if BITS_PER_LONG == 32
2812 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2813 return PQI_RAID_BYPASS_INELIGIBLE;
2816 /* Verify first and last block are in same RAID group. */
2817 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2818 #if BITS_PER_LONG == 32
2819 tmpdiv = rmd->first_block;
2820 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2821 tmpdiv = rmd->first_group;
2822 do_div(tmpdiv, rmd->blocks_per_row);
2823 rmd->first_group = tmpdiv;
2824 tmpdiv = rmd->last_block;
2825 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2826 tmpdiv = rmd->last_group;
2827 do_div(tmpdiv, rmd->blocks_per_row);
2828 rmd->last_group = tmpdiv;
2830 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2831 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2833 if (rmd->first_group != rmd->last_group)
2834 return PQI_RAID_BYPASS_INELIGIBLE;
2836 /* Verify request is in a single row of RAID 5/6. */
2837 #if BITS_PER_LONG == 32
2838 tmpdiv = rmd->first_block;
2839 do_div(tmpdiv, rmd->stripesize);
2840 rmd->first_row = tmpdiv;
2841 rmd->r5or6_first_row = tmpdiv;
2842 tmpdiv = rmd->last_block;
2843 do_div(tmpdiv, rmd->stripesize);
2844 rmd->r5or6_last_row = tmpdiv;
2846 rmd->first_row = rmd->r5or6_first_row =
2847 rmd->first_block / rmd->stripesize;
2848 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2850 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2851 return PQI_RAID_BYPASS_INELIGIBLE;
2853 /* Verify request is in a single column. */
2854 #if BITS_PER_LONG == 32
2855 tmpdiv = rmd->first_block;
2856 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2857 tmpdiv = rmd->first_row_offset;
2858 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2859 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2860 tmpdiv = rmd->last_block;
2861 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2862 tmpdiv = rmd->r5or6_last_row_offset;
2863 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2864 tmpdiv = rmd->r5or6_first_row_offset;
2865 do_div(tmpdiv, rmd->strip_size);
2866 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2867 tmpdiv = rmd->r5or6_last_row_offset;
2868 do_div(tmpdiv, rmd->strip_size);
2869 rmd->r5or6_last_column = tmpdiv;
2871 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2872 (u32)((rmd->first_block % rmd->stripesize) %
2873 rmd->blocks_per_row);
2875 rmd->r5or6_last_row_offset =
2876 (u32)((rmd->last_block % rmd->stripesize) %
2877 rmd->blocks_per_row);
2880 rmd->r5or6_first_row_offset / rmd->strip_size;
2881 rmd->r5or6_first_column = rmd->first_column;
2882 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2884 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2885 return PQI_RAID_BYPASS_INELIGIBLE;
2887 /* Request is eligible. */
2889 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2890 get_unaligned_le16(&raid_map->row_cnt);
2892 rmd->map_index = (rmd->first_group *
2893 (get_unaligned_le16(&raid_map->row_cnt) *
2894 rmd->total_disks_per_row)) +
2895 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2897 if (rmd->is_write) {
2901 * p_parity_it_nexus and q_parity_it_nexus are pointers to the
2902 * parity entries inside the device's raid_map.
2904 * A device's RAID map is bounded by: number of RAID disks squared.
2906 * The devices RAID map size is checked during device
2909 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2910 index *= rmd->total_disks_per_row;
2911 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row);
2913 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2914 if (rmd->raid_level == SA_RAID_6) {
2915 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2916 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2918 #if BITS_PER_LONG == 32
2919 tmpdiv = rmd->first_block;
2920 do_div(tmpdiv, rmd->blocks_per_row);
2923 rmd->row = rmd->first_block / rmd->blocks_per_row;
2930 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2932 /* Build the new CDB for the physical disk I/O. */
2933 if (rmd->disk_block > 0xffffffff) {
2934 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2936 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2937 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2940 rmd->cdb_length = 16;
2942 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2944 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2946 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2948 rmd->cdb_length = 10;
2952 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
2953 struct pqi_scsi_dev_raid_map_data *rmd)
2958 group = rmd->map_index / rmd->data_disks_per_row;
2960 index = rmd->map_index - (group * rmd->data_disks_per_row);
2961 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2962 index += rmd->data_disks_per_row;
2963 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2964 if (rmd->layout_map_count > 2) {
2965 index += rmd->data_disks_per_row;
2966 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2969 rmd->num_it_nexus_entries = rmd->layout_map_count;
2972 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2973 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2974 struct pqi_queue_group *queue_group)
2977 struct raid_map *raid_map;
2979 u32 next_bypass_group;
2980 struct pqi_encryption_info *encryption_info_ptr;
2981 struct pqi_encryption_info encryption_info;
2982 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2984 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2986 return PQI_RAID_BYPASS_INELIGIBLE;
2988 rmd.raid_level = device->raid_level;
2990 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2991 return PQI_RAID_BYPASS_INELIGIBLE;
2993 if (unlikely(rmd.block_cnt == 0))
2994 return PQI_RAID_BYPASS_INELIGIBLE;
2996 raid_map = device->raid_map;
2998 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
3000 return PQI_RAID_BYPASS_INELIGIBLE;
3002 if (device->raid_level == SA_RAID_1 ||
3003 device->raid_level == SA_RAID_TRIPLE) {
3005 pqi_calc_aio_r1_nexus(raid_map, &rmd);
3007 group = device->next_bypass_group[rmd.map_index];
3008 next_bypass_group = group + 1;
3009 if (next_bypass_group >= rmd.layout_map_count)
3010 next_bypass_group = 0;
3011 device->next_bypass_group[rmd.map_index] = next_bypass_group;
3012 rmd.map_index += group * rmd.data_disks_per_row;
3014 } else if ((device->raid_level == SA_RAID_5 ||
3015 device->raid_level == SA_RAID_6) &&
3016 (rmd.layout_map_count > 1 || rmd.is_write)) {
3017 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
3019 return PQI_RAID_BYPASS_INELIGIBLE;
3022 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
3023 return PQI_RAID_BYPASS_INELIGIBLE;
3025 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3026 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3027 rmd.first_row * rmd.strip_size +
3028 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3029 rmd.disk_block_cnt = rmd.block_cnt;
3031 /* Handle differing logical/physical block sizes. */
3032 if (raid_map->phys_blk_shift) {
3033 rmd.disk_block <<= raid_map->phys_blk_shift;
3034 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
3037 if (unlikely(rmd.disk_block_cnt > 0xffff))
3038 return PQI_RAID_BYPASS_INELIGIBLE;
3040 pqi_set_aio_cdb(&rmd);
3042 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) {
3043 if (rmd.data_length > device->max_transfer_encrypted)
3044 return PQI_RAID_BYPASS_INELIGIBLE;
3045 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3046 encryption_info_ptr = &encryption_info;
3048 encryption_info_ptr = NULL;
3052 switch (device->raid_level) {
3054 case SA_RAID_TRIPLE:
3055 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group,
3056 encryption_info_ptr, device, &rmd);
3059 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group,
3060 encryption_info_ptr, device, &rmd);
3064 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3065 rmd.cdb, rmd.cdb_length, queue_group,
3066 encryption_info_ptr, true, false);
3069 #define PQI_STATUS_IDLE 0x0
3071 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
3072 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
3074 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
3075 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
3076 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
3077 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
3078 #define PQI_DEVICE_STATE_ERROR 0x4
3080 #define PQI_MODE_READY_TIMEOUT_SECS 30
3081 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
3083 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
3085 struct pqi_device_registers __iomem *pqi_registers;
3086 unsigned long timeout;
3090 pqi_registers = ctrl_info->pqi_registers;
3091 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
3094 signature = readq(&pqi_registers->signature);
3095 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
3096 sizeof(signature)) == 0)
3098 if (time_after(jiffies, timeout)) {
3099 dev_err(&ctrl_info->pci_dev->dev,
3100 "timed out waiting for PQI signature\n");
3103 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3107 status = readb(&pqi_registers->function_and_status_code);
3108 if (status == PQI_STATUS_IDLE)
3110 if (time_after(jiffies, timeout)) {
3111 dev_err(&ctrl_info->pci_dev->dev,
3112 "timed out waiting for PQI IDLE\n");
3115 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3119 if (readl(&pqi_registers->device_status) ==
3120 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
3122 if (time_after(jiffies, timeout)) {
3123 dev_err(&ctrl_info->pci_dev->dev,
3124 "timed out waiting for PQI all registers ready\n");
3127 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
3133 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
3135 struct pqi_scsi_dev *device;
3137 device = io_request->scmd->device->hostdata;
3138 device->raid_bypass_enabled = false;
3139 device->aio_enabled = false;
3142 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
3144 struct pqi_ctrl_info *ctrl_info;
3145 struct pqi_scsi_dev *device;
3147 device = sdev->hostdata;
3148 if (device->device_offline)
3151 device->device_offline = true;
3152 ctrl_info = shost_to_hba(sdev->host);
3153 pqi_schedule_rescan_worker(ctrl_info);
3154 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n",
3155 path, ctrl_info->scsi_host->host_no, device->bus,
3156 device->target, device->lun);
3159 static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
3163 struct scsi_cmnd *scmd;
3164 struct pqi_raid_error_info *error_info;
3165 size_t sense_data_length;
3168 struct scsi_sense_hdr sshdr;
3170 scmd = io_request->scmd;
3174 error_info = io_request->error_info;
3175 scsi_status = error_info->status;
3178 switch (error_info->data_out_result) {
3179 case PQI_DATA_IN_OUT_GOOD:
3181 case PQI_DATA_IN_OUT_UNDERFLOW:
3183 get_unaligned_le32(&error_info->data_out_transferred);
3184 residual_count = scsi_bufflen(scmd) - xfer_count;
3185 scsi_set_resid(scmd, residual_count);
3186 if (xfer_count < scmd->underflow)
3187 host_byte = DID_SOFT_ERROR;
3189 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
3190 case PQI_DATA_IN_OUT_ABORTED:
3191 host_byte = DID_ABORT;
3193 case PQI_DATA_IN_OUT_TIMEOUT:
3194 host_byte = DID_TIME_OUT;
3196 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
3197 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
3198 case PQI_DATA_IN_OUT_BUFFER_ERROR:
3199 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
3200 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
3201 case PQI_DATA_IN_OUT_ERROR:
3202 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
3203 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
3204 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
3205 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
3206 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
3207 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
3208 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
3209 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
3210 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
3211 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
3213 host_byte = DID_ERROR;
3217 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
3218 if (sense_data_length == 0)
3220 get_unaligned_le16(&error_info->response_data_length);
3221 if (sense_data_length) {
3222 if (sense_data_length > sizeof(error_info->data))
3223 sense_data_length = sizeof(error_info->data);
3225 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3226 scsi_normalize_sense(error_info->data,
3227 sense_data_length, &sshdr) &&
3228 sshdr.sense_key == HARDWARE_ERROR &&
3229 sshdr.asc == 0x3e) {
3230 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
3231 struct pqi_scsi_dev *device = scmd->device->hostdata;
3233 switch (sshdr.ascq) {
3234 case 0x1: /* LOGICAL UNIT FAILURE */
3235 if (printk_ratelimit())
3236 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
3237 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3238 pqi_take_device_offline(scmd->device, "RAID");
3239 host_byte = DID_NO_CONNECT;
3242 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
3243 if (printk_ratelimit())
3244 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
3245 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
3250 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3251 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3252 memcpy(scmd->sense_buffer, error_info->data,
3256 if (pqi_cmd_priv(scmd)->this_residual &&
3257 !pqi_is_logical_device(scmd->device->hostdata) &&
3258 scsi_status == SAM_STAT_CHECK_CONDITION &&
3259 host_byte == DID_OK &&
3260 sense_data_length &&
3261 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) &&
3262 sshdr.sense_key == ILLEGAL_REQUEST &&
3263 sshdr.asc == 0x26 &&
3264 sshdr.ascq == 0x0) {
3265 host_byte = DID_NO_CONNECT;
3266 pqi_take_device_offline(scmd->device, "AIO");
3267 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1);
3270 scmd->result = scsi_status;
3271 set_host_byte(scmd, host_byte);
3274 static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
3278 struct scsi_cmnd *scmd;
3279 struct pqi_aio_error_info *error_info;
3280 size_t sense_data_length;
3283 bool device_offline;
3285 scmd = io_request->scmd;
3286 error_info = io_request->error_info;
3288 sense_data_length = 0;
3289 device_offline = false;
3291 switch (error_info->service_response) {
3292 case PQI_AIO_SERV_RESPONSE_COMPLETE:
3293 scsi_status = error_info->status;
3295 case PQI_AIO_SERV_RESPONSE_FAILURE:
3296 switch (error_info->status) {
3297 case PQI_AIO_STATUS_IO_ABORTED:
3298 scsi_status = SAM_STAT_TASK_ABORTED;
3300 case PQI_AIO_STATUS_UNDERRUN:
3301 scsi_status = SAM_STAT_GOOD;
3302 residual_count = get_unaligned_le32(
3303 &error_info->residual_count);
3304 scsi_set_resid(scmd, residual_count);
3305 xfer_count = scsi_bufflen(scmd) - residual_count;
3306 if (xfer_count < scmd->underflow)
3307 host_byte = DID_SOFT_ERROR;
3309 case PQI_AIO_STATUS_OVERRUN:
3310 scsi_status = SAM_STAT_GOOD;
3312 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
3313 pqi_aio_path_disabled(io_request);
3314 scsi_status = SAM_STAT_GOOD;
3315 io_request->status = -EAGAIN;
3317 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
3318 case PQI_AIO_STATUS_INVALID_DEVICE:
3319 if (!io_request->raid_bypass) {
3320 device_offline = true;
3321 pqi_take_device_offline(scmd->device, "AIO");
3322 host_byte = DID_NO_CONNECT;
3324 scsi_status = SAM_STAT_CHECK_CONDITION;
3326 case PQI_AIO_STATUS_IO_ERROR:
3328 scsi_status = SAM_STAT_CHECK_CONDITION;
3332 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
3333 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
3334 scsi_status = SAM_STAT_GOOD;
3336 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
3337 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
3339 scsi_status = SAM_STAT_CHECK_CONDITION;
3343 if (error_info->data_present) {
3345 get_unaligned_le16(&error_info->data_length);
3346 if (sense_data_length) {
3347 if (sense_data_length > sizeof(error_info->data))
3348 sense_data_length = sizeof(error_info->data);
3349 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
3350 sense_data_length = SCSI_SENSE_BUFFERSIZE;
3351 memcpy(scmd->sense_buffer, error_info->data,
3356 if (device_offline && sense_data_length == 0)
3357 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1);
3359 scmd->result = scsi_status;
3360 set_host_byte(scmd, host_byte);
3363 static void pqi_process_io_error(unsigned int iu_type,
3364 struct pqi_io_request *io_request)
3367 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3368 pqi_process_raid_io_error(io_request);
3370 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3371 pqi_process_aio_io_error(io_request);
3376 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
3377 struct pqi_task_management_response *response)
3381 switch (response->response_code) {
3382 case SOP_TMF_COMPLETE:
3383 case SOP_TMF_FUNCTION_SUCCEEDED:
3386 case SOP_TMF_REJECTED:
3389 case SOP_TMF_INCORRECT_LOGICAL_UNIT:
3398 dev_err(&ctrl_info->pci_dev->dev,
3399 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code);
3404 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
3405 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
3407 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason);
3410 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
3415 struct pqi_io_request *io_request;
3416 struct pqi_io_response *response;
3420 oq_ci = queue_group->oq_ci_copy;
3423 oq_pi = readl(queue_group->oq_pi);
3424 if (oq_pi >= ctrl_info->num_elements_per_oq) {
3425 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE);
3426 dev_err(&ctrl_info->pci_dev->dev,
3427 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3428 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
3435 response = queue_group->oq_element_array +
3436 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3438 request_id = get_unaligned_le16(&response->request_id);
3439 if (request_id >= ctrl_info->max_io_slots) {
3440 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID);
3441 dev_err(&ctrl_info->pci_dev->dev,
3442 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
3443 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
3447 io_request = &ctrl_info->io_request_pool[request_id];
3448 if (atomic_read(&io_request->refcount) == 0) {
3449 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID);
3450 dev_err(&ctrl_info->pci_dev->dev,
3451 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
3452 request_id, oq_pi, oq_ci);
3456 switch (response->header.iu_type) {
3457 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
3458 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
3459 if (io_request->scmd)
3460 io_request->scmd->result = 0;
3462 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
3464 case PQI_RESPONSE_IU_VENDOR_GENERAL:
3465 io_request->status =
3467 &((struct pqi_vendor_general_response *)response)->status);
3469 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
3470 io_request->status = pqi_interpret_task_management_response(ctrl_info,
3473 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
3474 pqi_aio_path_disabled(io_request);
3475 io_request->status = -EAGAIN;
3477 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
3478 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
3479 io_request->error_info = ctrl_info->error_buffer +
3480 (get_unaligned_le16(&response->error_index) *
3481 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
3482 pqi_process_io_error(response->header.iu_type, io_request);
3485 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE);
3486 dev_err(&ctrl_info->pci_dev->dev,
3487 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
3488 response->header.iu_type, oq_pi, oq_ci);
3492 io_request->io_complete_callback(io_request, io_request->context);
3495 * Note that the I/O request structure CANNOT BE TOUCHED after
3496 * returning from the I/O completion callback!
3498 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
3501 if (num_responses) {
3502 queue_group->oq_ci_copy = oq_ci;
3503 writel(oq_ci, queue_group->oq_ci);
3506 return num_responses;
3509 static inline unsigned int pqi_num_elements_free(unsigned int pi,
3510 unsigned int ci, unsigned int elements_in_queue)
3512 unsigned int num_elements_used;
3515 num_elements_used = pi - ci;
3517 num_elements_used = elements_in_queue - ci + pi;
3519 return elements_in_queue - num_elements_used - 1;
3522 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
3523 struct pqi_event_acknowledge_request *iu, size_t iu_length)
3527 unsigned long flags;
3529 struct pqi_queue_group *queue_group;
3531 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
3532 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
3535 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
3537 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
3538 iq_ci = readl(queue_group->iq_ci[RAID_PATH]);
3540 if (pqi_num_elements_free(iq_pi, iq_ci,
3541 ctrl_info->num_elements_per_iq))
3544 spin_unlock_irqrestore(
3545 &queue_group->submit_lock[RAID_PATH], flags);
3547 if (pqi_ctrl_offline(ctrl_info))
3551 next_element = queue_group->iq_element_array[RAID_PATH] +
3552 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3554 memcpy(next_element, iu, iu_length);
3556 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
3557 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
3560 * This write notifies the controller that an IU is available to be
3563 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
3565 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
3568 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
3569 struct pqi_event *event)
3571 struct pqi_event_acknowledge_request request;
3573 memset(&request, 0, sizeof(request));
3575 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
3576 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
3577 &request.header.iu_length);
3578 request.event_type = event->event_type;
3579 put_unaligned_le16(event->event_id, &request.event_id);
3580 put_unaligned_le32(event->additional_event_id, &request.additional_event_id);
3582 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
3585 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3586 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3588 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
3589 struct pqi_ctrl_info *ctrl_info)
3592 unsigned long timeout;
3594 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies;
3597 status = pqi_read_soft_reset_status(ctrl_info);
3598 if (status & PQI_SOFT_RESET_INITIATE)
3599 return RESET_INITIATE_DRIVER;
3601 if (status & PQI_SOFT_RESET_ABORT)
3604 if (!sis_is_firmware_running(ctrl_info))
3605 return RESET_NORESPONSE;
3607 if (time_after(jiffies, timeout)) {
3608 dev_warn(&ctrl_info->pci_dev->dev,
3609 "timed out waiting for soft reset status\n");
3610 return RESET_TIMEDOUT;
3613 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS);
3617 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
3620 unsigned int delay_secs;
3621 enum pqi_soft_reset_status reset_status;
3623 if (ctrl_info->soft_reset_handshake_supported)
3624 reset_status = pqi_poll_for_soft_reset_status(ctrl_info);
3626 reset_status = RESET_INITIATE_FIRMWARE;
3628 delay_secs = PQI_POST_RESET_DELAY_SECS;
3630 switch (reset_status) {
3631 case RESET_TIMEDOUT:
3632 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS;
3634 case RESET_INITIATE_DRIVER:
3635 dev_info(&ctrl_info->pci_dev->dev,
3636 "Online Firmware Activation: resetting controller\n");
3637 sis_soft_reset(ctrl_info);
3639 case RESET_INITIATE_FIRMWARE:
3640 ctrl_info->pqi_mode_enabled = false;
3641 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
3642 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs);
3643 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3644 pqi_ctrl_ofa_done(ctrl_info);
3645 dev_info(&ctrl_info->pci_dev->dev,
3646 "Online Firmware Activation: %s\n",
3647 rc == 0 ? "SUCCESS" : "FAILED");
3650 dev_info(&ctrl_info->pci_dev->dev,
3651 "Online Firmware Activation ABORTED\n");
3652 if (ctrl_info->soft_reset_handshake_supported)
3653 pqi_clear_soft_reset_status(ctrl_info);
3654 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3655 pqi_ctrl_ofa_done(ctrl_info);
3656 pqi_ofa_ctrl_unquiesce(ctrl_info);
3658 case RESET_NORESPONSE:
3661 dev_err(&ctrl_info->pci_dev->dev,
3662 "unexpected Online Firmware Activation reset status: 0x%x\n",
3664 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3665 pqi_ctrl_ofa_done(ctrl_info);
3666 pqi_ofa_ctrl_unquiesce(ctrl_info);
3667 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT);
3672 static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
3674 struct pqi_ctrl_info *ctrl_info;
3676 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work);
3678 pqi_ctrl_ofa_start(ctrl_info);
3679 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested);
3680 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE);
3683 static void pqi_ofa_quiesce_worker(struct work_struct *work)
3685 struct pqi_ctrl_info *ctrl_info;
3686 struct pqi_event *event;
3688 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work);
3690 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)];
3692 pqi_ofa_ctrl_quiesce(ctrl_info);
3693 pqi_acknowledge_event(ctrl_info, event);
3694 pqi_process_soft_reset(ctrl_info);
3697 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
3698 struct pqi_event *event)
3704 switch (event->event_id) {
3705 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3706 dev_info(&ctrl_info->pci_dev->dev,
3707 "received Online Firmware Activation memory allocation request\n");
3708 schedule_work(&ctrl_info->ofa_memory_alloc_work);
3710 case PQI_EVENT_OFA_QUIESCE:
3711 dev_info(&ctrl_info->pci_dev->dev,
3712 "received Online Firmware Activation quiesce request\n");
3713 schedule_work(&ctrl_info->ofa_quiesce_work);
3716 case PQI_EVENT_OFA_CANCELED:
3717 dev_info(&ctrl_info->pci_dev->dev,
3718 "received Online Firmware Activation cancel request: reason: %u\n",
3719 ctrl_info->ofa_cancel_reason);
3720 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory);
3721 pqi_ctrl_ofa_done(ctrl_info);
3724 dev_err(&ctrl_info->pci_dev->dev,
3725 "received unknown Online Firmware Activation request: event ID: %u\n",
3733 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
3735 unsigned long flags;
3736 struct pqi_scsi_dev *device;
3738 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3740 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
3741 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
3742 device->rescan = true;
3745 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3748 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
3750 unsigned long flags;
3751 struct pqi_scsi_dev *device;
3753 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
3755 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
3756 if (device->raid_bypass_enabled)
3757 device->raid_bypass_enabled = false;
3759 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
3762 static void pqi_event_worker(struct work_struct *work)
3766 struct pqi_ctrl_info *ctrl_info;
3767 struct pqi_event *event;
3770 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
3772 pqi_ctrl_busy(ctrl_info);
3773 pqi_wait_if_ctrl_blocked(ctrl_info);
3774 if (pqi_ctrl_offline(ctrl_info))
3777 rescan_needed = false;
3778 event = ctrl_info->events;
3779 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
3780 if (event->pending) {
3781 event->pending = false;
3782 if (event->event_type == PQI_EVENT_TYPE_OFA) {
3783 ack_event = pqi_ofa_process_event(ctrl_info, event);
3786 rescan_needed = true;
3787 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE)
3788 pqi_mark_volumes_for_rescan(ctrl_info);
3789 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE)
3790 pqi_disable_raid_bypass(ctrl_info);
3793 pqi_acknowledge_event(ctrl_info, event);
3798 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ)
3801 pqi_schedule_rescan_worker_with_delay(ctrl_info,
3802 PQI_RESCAN_WORK_FOR_EVENT_DELAY);
3805 pqi_ctrl_unbusy(ctrl_info);
3808 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
3810 static void pqi_heartbeat_timer_handler(struct timer_list *t)
3813 u32 heartbeat_count;
3814 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer);
3816 pqi_check_ctrl_health(ctrl_info);
3817 if (pqi_ctrl_offline(ctrl_info))
3820 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
3821 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
3823 if (num_interrupts == ctrl_info->previous_num_interrupts) {
3824 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
3825 dev_err(&ctrl_info->pci_dev->dev,
3826 "no heartbeat detected - last heartbeat count: %u\n",
3828 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT);
3832 ctrl_info->previous_num_interrupts = num_interrupts;
3835 ctrl_info->previous_heartbeat_count = heartbeat_count;
3836 mod_timer(&ctrl_info->heartbeat_timer,
3837 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
3840 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3842 if (!ctrl_info->heartbeat_counter)
3845 ctrl_info->previous_num_interrupts =
3846 atomic_read(&ctrl_info->num_interrupts);
3847 ctrl_info->previous_heartbeat_count =
3848 pqi_read_heartbeat_counter(ctrl_info);
3850 ctrl_info->heartbeat_timer.expires =
3851 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
3852 add_timer(&ctrl_info->heartbeat_timer);
3855 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
3857 del_timer_sync(&ctrl_info->heartbeat_timer);
3860 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
3861 struct pqi_event *event, struct pqi_event_response *response)
3863 switch (event->event_id) {
3864 case PQI_EVENT_OFA_MEMORY_ALLOCATION:
3865 ctrl_info->ofa_bytes_requested =
3866 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested);
3868 case PQI_EVENT_OFA_CANCELED:
3869 ctrl_info->ofa_cancel_reason =
3870 get_unaligned_le16(&response->data.ofa_cancelled.reason);
3875 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
3880 struct pqi_event_queue *event_queue;
3881 struct pqi_event_response *response;
3882 struct pqi_event *event;
3885 event_queue = &ctrl_info->event_queue;
3887 oq_ci = event_queue->oq_ci_copy;
3890 oq_pi = readl(event_queue->oq_pi);
3891 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
3892 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE);
3893 dev_err(&ctrl_info->pci_dev->dev,
3894 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3895 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
3903 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
3905 event_index = pqi_event_type_to_event_index(response->event_type);
3907 if (event_index >= 0 && response->request_acknowledge) {
3908 event = &ctrl_info->events[event_index];
3909 event->pending = true;
3910 event->event_type = response->event_type;
3911 event->event_id = get_unaligned_le16(&response->event_id);
3912 event->additional_event_id =
3913 get_unaligned_le32(&response->additional_event_id);
3914 if (event->event_type == PQI_EVENT_TYPE_OFA)
3915 pqi_ofa_capture_event_payload(ctrl_info, event, response);
3918 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
3922 event_queue->oq_ci_copy = oq_ci;
3923 writel(oq_ci, event_queue->oq_ci);
3924 schedule_work(&ctrl_info->event_work);
3930 #define PQI_LEGACY_INTX_MASK 0x1
3932 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
3935 struct pqi_device_registers __iomem *pqi_registers;
3936 volatile void __iomem *register_addr;
3938 pqi_registers = ctrl_info->pqi_registers;
3941 register_addr = &pqi_registers->legacy_intx_mask_clear;
3943 register_addr = &pqi_registers->legacy_intx_mask_set;
3945 intx_mask = readl(register_addr);
3946 intx_mask |= PQI_LEGACY_INTX_MASK;
3947 writel(intx_mask, register_addr);
3950 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
3951 enum pqi_irq_mode new_mode)
3953 switch (ctrl_info->irq_mode) {
3959 pqi_configure_legacy_intx(ctrl_info, true);
3960 sis_enable_intx(ctrl_info);
3969 pqi_configure_legacy_intx(ctrl_info, false);
3970 sis_enable_msix(ctrl_info);
3975 pqi_configure_legacy_intx(ctrl_info, false);
3982 sis_enable_msix(ctrl_info);
3985 pqi_configure_legacy_intx(ctrl_info, true);
3986 sis_enable_intx(ctrl_info);
3994 ctrl_info->irq_mode = new_mode;
3997 #define PQI_LEGACY_INTX_PENDING 0x1
3999 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
4004 switch (ctrl_info->irq_mode) {
4009 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status);
4010 if (intx_status & PQI_LEGACY_INTX_PENDING)
4024 static irqreturn_t pqi_irq_handler(int irq, void *data)
4026 struct pqi_ctrl_info *ctrl_info;
4027 struct pqi_queue_group *queue_group;
4028 int num_io_responses_handled;
4029 int num_events_handled;
4032 ctrl_info = queue_group->ctrl_info;
4034 if (!pqi_is_valid_irq(ctrl_info))
4037 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
4038 if (num_io_responses_handled < 0)
4041 if (irq == ctrl_info->event_irq) {
4042 num_events_handled = pqi_process_event_intr(ctrl_info);
4043 if (num_events_handled < 0)
4046 num_events_handled = 0;
4049 if (num_io_responses_handled + num_events_handled > 0)
4050 atomic_inc(&ctrl_info->num_interrupts);
4052 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
4053 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
4059 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
4061 struct pci_dev *pci_dev = ctrl_info->pci_dev;
4065 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
4067 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
4068 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
4069 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
4071 dev_err(&pci_dev->dev,
4072 "irq %u init failed with error %d\n",
4073 pci_irq_vector(pci_dev, i), rc);
4076 ctrl_info->num_msix_vectors_initialized++;
4082 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
4086 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
4087 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
4088 &ctrl_info->queue_groups[i]);
4090 ctrl_info->num_msix_vectors_initialized = 0;
4093 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4095 int num_vectors_enabled;
4096 unsigned int flags = PCI_IRQ_MSIX;
4098 if (!pqi_disable_managed_interrupts)
4099 flags |= PCI_IRQ_AFFINITY;
4101 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
4102 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
4104 if (num_vectors_enabled < 0) {
4105 dev_err(&ctrl_info->pci_dev->dev,
4106 "MSI-X init failed with error %d\n",
4107 num_vectors_enabled);
4108 return num_vectors_enabled;
4111 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
4112 ctrl_info->irq_mode = IRQ_MODE_MSIX;
4116 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
4118 if (ctrl_info->num_msix_vectors_enabled) {
4119 pci_free_irq_vectors(ctrl_info->pci_dev);
4120 ctrl_info->num_msix_vectors_enabled = 0;
4124 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
4127 size_t alloc_length;
4128 size_t element_array_length_per_iq;
4129 size_t element_array_length_per_oq;
4130 void *element_array;
4131 void __iomem *next_queue_index;
4132 void *aligned_pointer;
4133 unsigned int num_inbound_queues;
4134 unsigned int num_outbound_queues;
4135 unsigned int num_queue_indexes;
4136 struct pqi_queue_group *queue_group;
4138 element_array_length_per_iq =
4139 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
4140 ctrl_info->num_elements_per_iq;
4141 element_array_length_per_oq =
4142 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
4143 ctrl_info->num_elements_per_oq;
4144 num_inbound_queues = ctrl_info->num_queue_groups * 2;
4145 num_outbound_queues = ctrl_info->num_queue_groups;
4146 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
4148 aligned_pointer = NULL;
4150 for (i = 0; i < num_inbound_queues; i++) {
4151 aligned_pointer = PTR_ALIGN(aligned_pointer,
4152 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4153 aligned_pointer += element_array_length_per_iq;
4156 for (i = 0; i < num_outbound_queues; i++) {
4157 aligned_pointer = PTR_ALIGN(aligned_pointer,
4158 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4159 aligned_pointer += element_array_length_per_oq;
4162 aligned_pointer = PTR_ALIGN(aligned_pointer,
4163 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4164 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4165 PQI_EVENT_OQ_ELEMENT_LENGTH;
4167 for (i = 0; i < num_queue_indexes; i++) {
4168 aligned_pointer = PTR_ALIGN(aligned_pointer,
4169 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4170 aligned_pointer += sizeof(pqi_index_t);
4173 alloc_length = (size_t)aligned_pointer +
4174 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4176 alloc_length += PQI_EXTRA_SGL_MEMORY;
4178 ctrl_info->queue_memory_base =
4179 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4180 &ctrl_info->queue_memory_base_dma_handle,
4183 if (!ctrl_info->queue_memory_base)
4186 ctrl_info->queue_memory_length = alloc_length;
4188 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
4189 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4191 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4192 queue_group = &ctrl_info->queue_groups[i];
4193 queue_group->iq_element_array[RAID_PATH] = element_array;
4194 queue_group->iq_element_array_bus_addr[RAID_PATH] =
4195 ctrl_info->queue_memory_base_dma_handle +
4196 (element_array - ctrl_info->queue_memory_base);
4197 element_array += element_array_length_per_iq;
4198 element_array = PTR_ALIGN(element_array,
4199 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4200 queue_group->iq_element_array[AIO_PATH] = element_array;
4201 queue_group->iq_element_array_bus_addr[AIO_PATH] =
4202 ctrl_info->queue_memory_base_dma_handle +
4203 (element_array - ctrl_info->queue_memory_base);
4204 element_array += element_array_length_per_iq;
4205 element_array = PTR_ALIGN(element_array,
4206 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4209 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4210 queue_group = &ctrl_info->queue_groups[i];
4211 queue_group->oq_element_array = element_array;
4212 queue_group->oq_element_array_bus_addr =
4213 ctrl_info->queue_memory_base_dma_handle +
4214 (element_array - ctrl_info->queue_memory_base);
4215 element_array += element_array_length_per_oq;
4216 element_array = PTR_ALIGN(element_array,
4217 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4220 ctrl_info->event_queue.oq_element_array = element_array;
4221 ctrl_info->event_queue.oq_element_array_bus_addr =
4222 ctrl_info->queue_memory_base_dma_handle +
4223 (element_array - ctrl_info->queue_memory_base);
4224 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
4225 PQI_EVENT_OQ_ELEMENT_LENGTH;
4227 next_queue_index = (void __iomem *)PTR_ALIGN(element_array,
4228 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4230 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4231 queue_group = &ctrl_info->queue_groups[i];
4232 queue_group->iq_ci[RAID_PATH] = next_queue_index;
4233 queue_group->iq_ci_bus_addr[RAID_PATH] =
4234 ctrl_info->queue_memory_base_dma_handle +
4236 (void __iomem *)ctrl_info->queue_memory_base);
4237 next_queue_index += sizeof(pqi_index_t);
4238 next_queue_index = PTR_ALIGN(next_queue_index,
4239 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4240 queue_group->iq_ci[AIO_PATH] = next_queue_index;
4241 queue_group->iq_ci_bus_addr[AIO_PATH] =
4242 ctrl_info->queue_memory_base_dma_handle +
4244 (void __iomem *)ctrl_info->queue_memory_base);
4245 next_queue_index += sizeof(pqi_index_t);
4246 next_queue_index = PTR_ALIGN(next_queue_index,
4247 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4248 queue_group->oq_pi = next_queue_index;
4249 queue_group->oq_pi_bus_addr =
4250 ctrl_info->queue_memory_base_dma_handle +
4252 (void __iomem *)ctrl_info->queue_memory_base);
4253 next_queue_index += sizeof(pqi_index_t);
4254 next_queue_index = PTR_ALIGN(next_queue_index,
4255 PQI_OPERATIONAL_INDEX_ALIGNMENT);
4258 ctrl_info->event_queue.oq_pi = next_queue_index;
4259 ctrl_info->event_queue.oq_pi_bus_addr =
4260 ctrl_info->queue_memory_base_dma_handle +
4262 (void __iomem *)ctrl_info->queue_memory_base);
4267 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
4270 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4271 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
4274 * Initialize the backpointers to the controller structure in
4275 * each operational queue group structure.
4277 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4278 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
4281 * Assign IDs to all operational queues. Note that the IDs
4282 * assigned to operational IQs are independent of the IDs
4283 * assigned to operational OQs.
4285 ctrl_info->event_queue.oq_id = next_oq_id++;
4286 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4287 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
4288 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
4289 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
4293 * Assign MSI-X table entry indexes to all queues. Note that the
4294 * interrupt for the event queue is shared with the first queue group.
4296 ctrl_info->event_queue.int_msg_num = 0;
4297 for (i = 0; i < ctrl_info->num_queue_groups; i++)
4298 ctrl_info->queue_groups[i].int_msg_num = i;
4300 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4301 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
4302 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
4303 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
4304 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
4308 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
4310 size_t alloc_length;
4311 struct pqi_admin_queues_aligned *admin_queues_aligned;
4312 struct pqi_admin_queues *admin_queues;
4314 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
4315 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
4317 ctrl_info->admin_queue_memory_base =
4318 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
4319 &ctrl_info->admin_queue_memory_base_dma_handle,
4322 if (!ctrl_info->admin_queue_memory_base)
4325 ctrl_info->admin_queue_memory_length = alloc_length;
4327 admin_queues = &ctrl_info->admin_queues;
4328 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
4329 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
4330 admin_queues->iq_element_array =
4331 &admin_queues_aligned->iq_element_array;
4332 admin_queues->oq_element_array =
4333 &admin_queues_aligned->oq_element_array;
4334 admin_queues->iq_ci =
4335 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci;
4336 admin_queues->oq_pi =
4337 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi;
4339 admin_queues->iq_element_array_bus_addr =
4340 ctrl_info->admin_queue_memory_base_dma_handle +
4341 (admin_queues->iq_element_array -
4342 ctrl_info->admin_queue_memory_base);
4343 admin_queues->oq_element_array_bus_addr =
4344 ctrl_info->admin_queue_memory_base_dma_handle +
4345 (admin_queues->oq_element_array -
4346 ctrl_info->admin_queue_memory_base);
4347 admin_queues->iq_ci_bus_addr =
4348 ctrl_info->admin_queue_memory_base_dma_handle +
4349 ((void __iomem *)admin_queues->iq_ci -
4350 (void __iomem *)ctrl_info->admin_queue_memory_base);
4351 admin_queues->oq_pi_bus_addr =
4352 ctrl_info->admin_queue_memory_base_dma_handle +
4353 ((void __iomem *)admin_queues->oq_pi -
4354 (void __iomem *)ctrl_info->admin_queue_memory_base);
4359 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
4360 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
4362 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
4364 struct pqi_device_registers __iomem *pqi_registers;
4365 struct pqi_admin_queues *admin_queues;
4366 unsigned long timeout;
4370 pqi_registers = ctrl_info->pqi_registers;
4371 admin_queues = &ctrl_info->admin_queues;
4373 writeq((u64)admin_queues->iq_element_array_bus_addr,
4374 &pqi_registers->admin_iq_element_array_addr);
4375 writeq((u64)admin_queues->oq_element_array_bus_addr,
4376 &pqi_registers->admin_oq_element_array_addr);
4377 writeq((u64)admin_queues->iq_ci_bus_addr,
4378 &pqi_registers->admin_iq_ci_addr);
4379 writeq((u64)admin_queues->oq_pi_bus_addr,
4380 &pqi_registers->admin_oq_pi_addr);
4382 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
4383 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) |
4384 (admin_queues->int_msg_num << 16);
4385 writel(reg, &pqi_registers->admin_iq_num_elements);
4387 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
4388 &pqi_registers->function_and_status_code);
4390 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
4392 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
4393 status = readb(&pqi_registers->function_and_status_code);
4394 if (status == PQI_STATUS_IDLE)
4396 if (time_after(jiffies, timeout))
4401 * The offset registers are not initialized to the correct
4402 * offsets until *after* the create admin queue pair command
4403 * completes successfully.
4405 admin_queues->iq_pi = ctrl_info->iomem_base +
4406 PQI_DEVICE_REGISTERS_OFFSET +
4407 readq(&pqi_registers->admin_iq_pi_offset);
4408 admin_queues->oq_ci = ctrl_info->iomem_base +
4409 PQI_DEVICE_REGISTERS_OFFSET +
4410 readq(&pqi_registers->admin_oq_ci_offset);
4415 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
4416 struct pqi_general_admin_request *request)
4418 struct pqi_admin_queues *admin_queues;
4422 admin_queues = &ctrl_info->admin_queues;
4423 iq_pi = admin_queues->iq_pi_copy;
4425 next_element = admin_queues->iq_element_array +
4426 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
4428 memcpy(next_element, request, sizeof(*request));
4430 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
4431 admin_queues->iq_pi_copy = iq_pi;
4434 * This write notifies the controller that an IU is available to be
4437 writel(iq_pi, admin_queues->iq_pi);
4440 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
4442 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
4443 struct pqi_general_admin_response *response)
4445 struct pqi_admin_queues *admin_queues;
4448 unsigned long timeout;
4450 admin_queues = &ctrl_info->admin_queues;
4451 oq_ci = admin_queues->oq_ci_copy;
4453 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
4456 oq_pi = readl(admin_queues->oq_pi);
4459 if (time_after(jiffies, timeout)) {
4460 dev_err(&ctrl_info->pci_dev->dev,
4461 "timed out waiting for admin response\n");
4464 if (!sis_is_firmware_running(ctrl_info))
4466 usleep_range(1000, 2000);
4469 memcpy(response, admin_queues->oq_element_array +
4470 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
4472 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
4473 admin_queues->oq_ci_copy = oq_ci;
4474 writel(oq_ci, admin_queues->oq_ci);
4479 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
4480 struct pqi_queue_group *queue_group, enum pqi_io_path path,
4481 struct pqi_io_request *io_request)
4483 struct pqi_io_request *next;
4488 unsigned long flags;
4489 unsigned int num_elements_needed;
4490 unsigned int num_elements_to_end_of_queue;
4492 struct pqi_iu_header *request;
4494 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
4497 io_request->queue_group = queue_group;
4498 list_add_tail(&io_request->request_list_entry,
4499 &queue_group->request_list[path]);
4502 iq_pi = queue_group->iq_pi_copy[path];
4504 list_for_each_entry_safe(io_request, next,
4505 &queue_group->request_list[path], request_list_entry) {
4507 request = io_request->iu;
4509 iu_length = get_unaligned_le16(&request->iu_length) +
4510 PQI_REQUEST_HEADER_LENGTH;
4511 num_elements_needed =
4512 DIV_ROUND_UP(iu_length,
4513 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4515 iq_ci = readl(queue_group->iq_ci[path]);
4517 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
4518 ctrl_info->num_elements_per_iq))
4521 put_unaligned_le16(queue_group->oq_id,
4522 &request->response_queue_id);
4524 next_element = queue_group->iq_element_array[path] +
4525 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4527 num_elements_to_end_of_queue =
4528 ctrl_info->num_elements_per_iq - iq_pi;
4530 if (num_elements_needed <= num_elements_to_end_of_queue) {
4531 memcpy(next_element, request, iu_length);
4533 copy_count = num_elements_to_end_of_queue *
4534 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4535 memcpy(next_element, request, copy_count);
4536 memcpy(queue_group->iq_element_array[path],
4537 (u8 *)request + copy_count,
4538 iu_length - copy_count);
4541 iq_pi = (iq_pi + num_elements_needed) %
4542 ctrl_info->num_elements_per_iq;
4544 list_del(&io_request->request_list_entry);
4547 if (iq_pi != queue_group->iq_pi_copy[path]) {
4548 queue_group->iq_pi_copy[path] = iq_pi;
4550 * This write notifies the controller that one or more IUs are
4551 * available to be processed.
4553 writel(iq_pi, queue_group->iq_pi[path]);
4556 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
4559 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
4561 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
4562 struct completion *wait)
4567 if (wait_for_completion_io_timeout(wait,
4568 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
4573 pqi_check_ctrl_health(ctrl_info);
4574 if (pqi_ctrl_offline(ctrl_info)) {
4583 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
4586 struct completion *waiting = context;
4591 static int pqi_process_raid_io_error_synchronous(
4592 struct pqi_raid_error_info *error_info)
4596 switch (error_info->data_out_result) {
4597 case PQI_DATA_IN_OUT_GOOD:
4598 if (error_info->status == SAM_STAT_GOOD)
4601 case PQI_DATA_IN_OUT_UNDERFLOW:
4602 if (error_info->status == SAM_STAT_GOOD ||
4603 error_info->status == SAM_STAT_CHECK_CONDITION)
4606 case PQI_DATA_IN_OUT_ABORTED:
4607 rc = PQI_CMD_STATUS_ABORTED;
4614 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
4616 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0;
4619 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
4620 struct pqi_iu_header *request, unsigned int flags,
4621 struct pqi_raid_error_info *error_info)
4624 struct pqi_io_request *io_request;
4626 DECLARE_COMPLETION_ONSTACK(wait);
4628 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
4629 if (down_interruptible(&ctrl_info->sync_request_sem))
4630 return -ERESTARTSYS;
4632 down(&ctrl_info->sync_request_sem);
4635 pqi_ctrl_busy(ctrl_info);
4637 * Wait for other admin queue updates such as;
4638 * config table changes, OFA memory updates, ...
4640 if (pqi_is_blockable_request(request))
4641 pqi_wait_if_ctrl_blocked(ctrl_info);
4643 if (pqi_ctrl_offline(ctrl_info)) {
4648 io_request = pqi_alloc_io_request(ctrl_info, NULL);
4650 put_unaligned_le16(io_request->index,
4651 &(((struct pqi_raid_path_request *)request)->request_id));
4653 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
4654 ((struct pqi_raid_path_request *)request)->error_index =
4655 ((struct pqi_raid_path_request *)request)->request_id;
4657 iu_length = get_unaligned_le16(&request->iu_length) +
4658 PQI_REQUEST_HEADER_LENGTH;
4659 memcpy(io_request->iu, request, iu_length);
4661 io_request->io_complete_callback = pqi_raid_synchronous_complete;
4662 io_request->context = &wait;
4664 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4667 pqi_wait_for_completion_io(ctrl_info, &wait);
4670 if (io_request->error_info)
4671 memcpy(error_info, io_request->error_info, sizeof(*error_info));
4673 memset(error_info, 0, sizeof(*error_info));
4674 } else if (rc == 0 && io_request->error_info) {
4675 rc = pqi_process_raid_io_error_synchronous(io_request->error_info);
4678 pqi_free_io_request(io_request);
4681 pqi_ctrl_unbusy(ctrl_info);
4682 up(&ctrl_info->sync_request_sem);
4687 static int pqi_validate_admin_response(
4688 struct pqi_general_admin_response *response, u8 expected_function_code)
4690 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
4693 if (get_unaligned_le16(&response->header.iu_length) !=
4694 PQI_GENERAL_ADMIN_IU_LENGTH)
4697 if (response->function_code != expected_function_code)
4700 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
4706 static int pqi_submit_admin_request_synchronous(
4707 struct pqi_ctrl_info *ctrl_info,
4708 struct pqi_general_admin_request *request,
4709 struct pqi_general_admin_response *response)
4713 pqi_submit_admin_request(ctrl_info, request);
4715 rc = pqi_poll_for_admin_response(ctrl_info, response);
4718 rc = pqi_validate_admin_response(response, request->function_code);
4723 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
4726 struct pqi_general_admin_request request;
4727 struct pqi_general_admin_response response;
4728 struct pqi_device_capability *capability;
4729 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
4731 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
4735 memset(&request, 0, sizeof(request));
4737 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4738 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4739 &request.header.iu_length);
4740 request.function_code =
4741 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
4742 put_unaligned_le32(sizeof(*capability),
4743 &request.data.report_device_capability.buffer_length);
4745 rc = pqi_map_single(ctrl_info->pci_dev,
4746 &request.data.report_device_capability.sg_descriptor,
4747 capability, sizeof(*capability),
4752 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response);
4754 pqi_pci_unmap(ctrl_info->pci_dev,
4755 &request.data.report_device_capability.sg_descriptor, 1,
4761 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
4766 ctrl_info->max_inbound_queues =
4767 get_unaligned_le16(&capability->max_inbound_queues);
4768 ctrl_info->max_elements_per_iq =
4769 get_unaligned_le16(&capability->max_elements_per_iq);
4770 ctrl_info->max_iq_element_length =
4771 get_unaligned_le16(&capability->max_iq_element_length)
4773 ctrl_info->max_outbound_queues =
4774 get_unaligned_le16(&capability->max_outbound_queues);
4775 ctrl_info->max_elements_per_oq =
4776 get_unaligned_le16(&capability->max_elements_per_oq);
4777 ctrl_info->max_oq_element_length =
4778 get_unaligned_le16(&capability->max_oq_element_length)
4781 sop_iu_layer_descriptor =
4782 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
4784 ctrl_info->max_inbound_iu_length_per_firmware =
4786 &sop_iu_layer_descriptor->max_inbound_iu_length);
4787 ctrl_info->inbound_spanning_supported =
4788 sop_iu_layer_descriptor->inbound_spanning_supported;
4789 ctrl_info->outbound_spanning_supported =
4790 sop_iu_layer_descriptor->outbound_spanning_supported;
4798 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
4800 if (ctrl_info->max_iq_element_length <
4801 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4802 dev_err(&ctrl_info->pci_dev->dev,
4803 "max. inbound queue element length of %d is less than the required length of %d\n",
4804 ctrl_info->max_iq_element_length,
4805 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4809 if (ctrl_info->max_oq_element_length <
4810 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
4811 dev_err(&ctrl_info->pci_dev->dev,
4812 "max. outbound queue element length of %d is less than the required length of %d\n",
4813 ctrl_info->max_oq_element_length,
4814 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
4818 if (ctrl_info->max_inbound_iu_length_per_firmware <
4819 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
4820 dev_err(&ctrl_info->pci_dev->dev,
4821 "max. inbound IU length of %u is less than the min. required length of %d\n",
4822 ctrl_info->max_inbound_iu_length_per_firmware,
4823 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4827 if (!ctrl_info->inbound_spanning_supported) {
4828 dev_err(&ctrl_info->pci_dev->dev,
4829 "the controller does not support inbound spanning\n");
4833 if (ctrl_info->outbound_spanning_supported) {
4834 dev_err(&ctrl_info->pci_dev->dev,
4835 "the controller supports outbound spanning but this driver does not\n");
4842 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
4845 struct pqi_event_queue *event_queue;
4846 struct pqi_general_admin_request request;
4847 struct pqi_general_admin_response response;
4849 event_queue = &ctrl_info->event_queue;
4852 * Create OQ (Outbound Queue - device to host queue) to dedicate
4855 memset(&request, 0, sizeof(request));
4856 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4857 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4858 &request.header.iu_length);
4859 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4860 put_unaligned_le16(event_queue->oq_id,
4861 &request.data.create_operational_oq.queue_id);
4862 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
4863 &request.data.create_operational_oq.element_array_addr);
4864 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
4865 &request.data.create_operational_oq.pi_addr);
4866 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
4867 &request.data.create_operational_oq.num_elements);
4868 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
4869 &request.data.create_operational_oq.element_length);
4870 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4871 put_unaligned_le16(event_queue->int_msg_num,
4872 &request.data.create_operational_oq.int_msg_num);
4874 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4879 event_queue->oq_ci = ctrl_info->iomem_base +
4880 PQI_DEVICE_REGISTERS_OFFSET +
4882 &response.data.create_operational_oq.oq_ci_offset);
4887 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
4888 unsigned int group_number)
4891 struct pqi_queue_group *queue_group;
4892 struct pqi_general_admin_request request;
4893 struct pqi_general_admin_response response;
4895 queue_group = &ctrl_info->queue_groups[group_number];
4898 * Create IQ (Inbound Queue - host to device queue) for
4901 memset(&request, 0, sizeof(request));
4902 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4903 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4904 &request.header.iu_length);
4905 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4906 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
4907 &request.data.create_operational_iq.queue_id);
4909 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
4910 &request.data.create_operational_iq.element_array_addr);
4911 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
4912 &request.data.create_operational_iq.ci_addr);
4913 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4914 &request.data.create_operational_iq.num_elements);
4915 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4916 &request.data.create_operational_iq.element_length);
4917 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4919 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4922 dev_err(&ctrl_info->pci_dev->dev,
4923 "error creating inbound RAID queue\n");
4927 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
4928 PQI_DEVICE_REGISTERS_OFFSET +
4930 &response.data.create_operational_iq.iq_pi_offset);
4933 * Create IQ (Inbound Queue - host to device queue) for
4934 * Advanced I/O (AIO) path.
4936 memset(&request, 0, sizeof(request));
4937 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4938 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4939 &request.header.iu_length);
4940 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
4941 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4942 &request.data.create_operational_iq.queue_id);
4943 put_unaligned_le64((u64)queue_group->
4944 iq_element_array_bus_addr[AIO_PATH],
4945 &request.data.create_operational_iq.element_array_addr);
4946 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4947 &request.data.create_operational_iq.ci_addr);
4948 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4949 &request.data.create_operational_iq.num_elements);
4950 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4951 &request.data.create_operational_iq.element_length);
4952 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4954 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4957 dev_err(&ctrl_info->pci_dev->dev,
4958 "error creating inbound AIO queue\n");
4962 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4963 PQI_DEVICE_REGISTERS_OFFSET +
4965 &response.data.create_operational_iq.iq_pi_offset);
4968 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4969 * assumed to be for RAID path I/O unless we change the queue's
4972 memset(&request, 0, sizeof(request));
4973 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4974 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4975 &request.header.iu_length);
4976 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4977 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4978 &request.data.change_operational_iq_properties.queue_id);
4979 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4980 &request.data.change_operational_iq_properties.vendor_specific);
4982 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4985 dev_err(&ctrl_info->pci_dev->dev,
4986 "error changing queue property\n");
4991 * Create OQ (Outbound Queue - device to host queue).
4993 memset(&request, 0, sizeof(request));
4994 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4995 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4996 &request.header.iu_length);
4997 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4998 put_unaligned_le16(queue_group->oq_id,
4999 &request.data.create_operational_oq.queue_id);
5000 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
5001 &request.data.create_operational_oq.element_array_addr);
5002 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
5003 &request.data.create_operational_oq.pi_addr);
5004 put_unaligned_le16(ctrl_info->num_elements_per_oq,
5005 &request.data.create_operational_oq.num_elements);
5006 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
5007 &request.data.create_operational_oq.element_length);
5008 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
5009 put_unaligned_le16(queue_group->int_msg_num,
5010 &request.data.create_operational_oq.int_msg_num);
5012 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
5015 dev_err(&ctrl_info->pci_dev->dev,
5016 "error creating outbound queue\n");
5020 queue_group->oq_ci = ctrl_info->iomem_base +
5021 PQI_DEVICE_REGISTERS_OFFSET +
5023 &response.data.create_operational_oq.oq_ci_offset);
5028 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
5033 rc = pqi_create_event_queue(ctrl_info);
5035 dev_err(&ctrl_info->pci_dev->dev,
5036 "error creating event queue\n");
5040 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5041 rc = pqi_create_queue_group(ctrl_info, i);
5043 dev_err(&ctrl_info->pci_dev->dev,
5044 "error creating queue group number %u/%u\n",
5045 i, ctrl_info->num_queue_groups);
5053 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
5054 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
5056 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
5061 struct pqi_event_config *event_config;
5062 struct pqi_event_descriptor *event_descriptor;
5063 struct pqi_general_management_request request;
5065 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5070 memset(&request, 0, sizeof(request));
5072 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
5073 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5074 data.report_event_configuration.sg_descriptors[1]) -
5075 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5076 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5077 &request.data.report_event_configuration.buffer_length);
5079 rc = pqi_map_single(ctrl_info->pci_dev,
5080 request.data.report_event_configuration.sg_descriptors,
5081 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5086 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5088 pqi_pci_unmap(ctrl_info->pci_dev,
5089 request.data.report_event_configuration.sg_descriptors, 1,
5095 for (i = 0; i < event_config->num_event_descriptors; i++) {
5096 event_descriptor = &event_config->descriptors[i];
5097 if (enable_events &&
5098 pqi_is_supported_event(event_descriptor->event_type))
5099 put_unaligned_le16(ctrl_info->event_queue.oq_id,
5100 &event_descriptor->oq_id);
5102 put_unaligned_le16(0, &event_descriptor->oq_id);
5105 memset(&request, 0, sizeof(request));
5107 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
5108 put_unaligned_le16(offsetof(struct pqi_general_management_request,
5109 data.report_event_configuration.sg_descriptors[1]) -
5110 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
5111 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5112 &request.data.report_event_configuration.buffer_length);
5114 rc = pqi_map_single(ctrl_info->pci_dev,
5115 request.data.report_event_configuration.sg_descriptors,
5116 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
5121 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
5123 pqi_pci_unmap(ctrl_info->pci_dev,
5124 request.data.report_event_configuration.sg_descriptors, 1,
5128 kfree(event_config);
5133 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
5135 return pqi_configure_events(ctrl_info, true);
5138 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
5142 size_t sg_chain_buffer_length;
5143 struct pqi_io_request *io_request;
5145 if (!ctrl_info->io_request_pool)
5148 dev = &ctrl_info->pci_dev->dev;
5149 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5150 io_request = ctrl_info->io_request_pool;
5152 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5153 kfree(io_request->iu);
5154 if (!io_request->sg_chain_buffer)
5156 dma_free_coherent(dev, sg_chain_buffer_length,
5157 io_request->sg_chain_buffer,
5158 io_request->sg_chain_buffer_dma_handle);
5162 kfree(ctrl_info->io_request_pool);
5163 ctrl_info->io_request_pool = NULL;
5166 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
5168 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
5169 ctrl_info->error_buffer_length,
5170 &ctrl_info->error_buffer_dma_handle,
5172 if (!ctrl_info->error_buffer)
5178 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
5181 void *sg_chain_buffer;
5182 size_t sg_chain_buffer_length;
5183 dma_addr_t sg_chain_buffer_dma_handle;
5185 struct pqi_io_request *io_request;
5187 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots,
5188 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
5190 if (!ctrl_info->io_request_pool) {
5191 dev_err(&ctrl_info->pci_dev->dev,
5192 "failed to allocate I/O request pool\n");
5196 dev = &ctrl_info->pci_dev->dev;
5197 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
5198 io_request = ctrl_info->io_request_pool;
5200 for (i = 0; i < ctrl_info->max_io_slots; i++) {
5201 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
5203 if (!io_request->iu) {
5204 dev_err(&ctrl_info->pci_dev->dev,
5205 "failed to allocate IU buffers\n");
5209 sg_chain_buffer = dma_alloc_coherent(dev,
5210 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
5213 if (!sg_chain_buffer) {
5214 dev_err(&ctrl_info->pci_dev->dev,
5215 "failed to allocate PQI scatter-gather chain buffers\n");
5219 io_request->index = i;
5220 io_request->sg_chain_buffer = sg_chain_buffer;
5221 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle;
5228 pqi_free_all_io_requests(ctrl_info);
5234 * Calculate required resources that are sized based on max. outstanding
5235 * requests and max. transfer size.
5238 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
5240 u32 max_transfer_size;
5243 ctrl_info->scsi_ml_can_queue =
5244 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
5245 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
5247 ctrl_info->error_buffer_length =
5248 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
5251 max_transfer_size = min(ctrl_info->max_transfer_size,
5252 PQI_MAX_TRANSFER_SIZE_KDUMP);
5254 max_transfer_size = min(ctrl_info->max_transfer_size,
5255 PQI_MAX_TRANSFER_SIZE);
5257 max_sg_entries = max_transfer_size / PAGE_SIZE;
5259 /* +1 to cover when the buffer is not page-aligned. */
5262 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
5264 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
5266 ctrl_info->sg_chain_buffer_length =
5267 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
5268 PQI_EXTRA_SGL_MEMORY;
5269 ctrl_info->sg_tablesize = max_sg_entries;
5270 ctrl_info->max_sectors = max_transfer_size / 512;
5273 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
5275 int num_queue_groups;
5276 u16 num_elements_per_iq;
5277 u16 num_elements_per_oq;
5279 if (reset_devices) {
5280 num_queue_groups = 1;
5283 int max_queue_groups;
5285 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
5286 ctrl_info->max_outbound_queues - 1);
5287 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
5289 num_cpus = num_online_cpus();
5290 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
5291 num_queue_groups = min(num_queue_groups, max_queue_groups);
5294 ctrl_info->num_queue_groups = num_queue_groups;
5297 * Make sure that the max. inbound IU length is an even multiple
5298 * of our inbound element length.
5300 ctrl_info->max_inbound_iu_length =
5301 (ctrl_info->max_inbound_iu_length_per_firmware /
5302 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
5303 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
5305 num_elements_per_iq =
5306 (ctrl_info->max_inbound_iu_length /
5307 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
5309 /* Add one because one element in each queue is unusable. */
5310 num_elements_per_iq++;
5312 num_elements_per_iq = min(num_elements_per_iq,
5313 ctrl_info->max_elements_per_iq);
5315 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
5316 num_elements_per_oq = min(num_elements_per_oq,
5317 ctrl_info->max_elements_per_oq);
5319 ctrl_info->num_elements_per_iq = num_elements_per_iq;
5320 ctrl_info->num_elements_per_oq = num_elements_per_oq;
5322 ctrl_info->max_sg_per_iu =
5323 ((ctrl_info->max_inbound_iu_length -
5324 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5325 sizeof(struct pqi_sg_descriptor)) +
5326 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
5328 ctrl_info->max_sg_per_r56_iu =
5329 ((ctrl_info->max_inbound_iu_length -
5330 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
5331 sizeof(struct pqi_sg_descriptor)) +
5332 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS;
5335 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
5336 struct scatterlist *sg)
5338 u64 address = (u64)sg_dma_address(sg);
5339 unsigned int length = sg_dma_len(sg);
5341 put_unaligned_le64(address, &sg_descriptor->address);
5342 put_unaligned_le32(length, &sg_descriptor->length);
5343 put_unaligned_le32(0, &sg_descriptor->flags);
5346 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
5347 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
5348 int max_sg_per_iu, bool *chained)
5351 unsigned int num_sg_in_iu;
5356 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
5359 pqi_set_sg_descriptor(sg_descriptor, sg);
5366 if (i == max_sg_per_iu) {
5367 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
5368 &sg_descriptor->address);
5369 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
5370 &sg_descriptor->length);
5371 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
5374 sg_descriptor = io_request->sg_chain_buffer;
5379 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
5381 return num_sg_in_iu;
5384 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
5385 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
5386 struct pqi_io_request *io_request)
5391 unsigned int num_sg_in_iu;
5392 struct scatterlist *sg;
5393 struct pqi_sg_descriptor *sg_descriptor;
5395 sg_count = scsi_dma_map(scmd);
5399 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5400 PQI_REQUEST_HEADER_LENGTH;
5405 sg = scsi_sglist(scmd);
5406 sg_descriptor = request->sg_descriptors;
5408 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5409 ctrl_info->max_sg_per_iu, &chained);
5411 request->partial = chained;
5412 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5415 put_unaligned_le16(iu_length, &request->header.iu_length);
5420 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
5421 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
5422 struct pqi_io_request *io_request)
5427 unsigned int num_sg_in_iu;
5428 struct scatterlist *sg;
5429 struct pqi_sg_descriptor *sg_descriptor;
5431 sg_count = scsi_dma_map(scmd);
5435 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) -
5436 PQI_REQUEST_HEADER_LENGTH;
5442 sg = scsi_sglist(scmd);
5443 sg_descriptor = request->sg_descriptors;
5445 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5446 ctrl_info->max_sg_per_iu, &chained);
5448 request->partial = chained;
5449 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5452 put_unaligned_le16(iu_length, &request->header.iu_length);
5453 request->num_sg_descriptors = num_sg_in_iu;
5458 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
5459 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
5460 struct pqi_io_request *io_request)
5465 unsigned int num_sg_in_iu;
5466 struct scatterlist *sg;
5467 struct pqi_sg_descriptor *sg_descriptor;
5469 sg_count = scsi_dma_map(scmd);
5473 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) -
5474 PQI_REQUEST_HEADER_LENGTH;
5477 if (sg_count != 0) {
5478 sg = scsi_sglist(scmd);
5479 sg_descriptor = request->sg_descriptors;
5481 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5482 ctrl_info->max_sg_per_r56_iu, &chained);
5484 request->partial = chained;
5485 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5488 put_unaligned_le16(iu_length, &request->header.iu_length);
5489 request->num_sg_descriptors = num_sg_in_iu;
5494 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
5495 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
5496 struct pqi_io_request *io_request)
5501 unsigned int num_sg_in_iu;
5502 struct scatterlist *sg;
5503 struct pqi_sg_descriptor *sg_descriptor;
5505 sg_count = scsi_dma_map(scmd);
5509 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
5510 PQI_REQUEST_HEADER_LENGTH;
5516 sg = scsi_sglist(scmd);
5517 sg_descriptor = request->sg_descriptors;
5519 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
5520 ctrl_info->max_sg_per_iu, &chained);
5522 request->partial = chained;
5523 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
5526 put_unaligned_le16(iu_length, &request->header.iu_length);
5527 request->num_sg_descriptors = num_sg_in_iu;
5532 static void pqi_raid_io_complete(struct pqi_io_request *io_request,
5535 struct scsi_cmnd *scmd;
5537 scmd = io_request->scmd;
5538 pqi_free_io_request(io_request);
5539 scsi_dma_unmap(scmd);
5540 pqi_scsi_done(scmd);
5543 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
5544 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5545 struct pqi_queue_group *queue_group, bool io_high_prio)
5549 struct pqi_io_request *io_request;
5550 struct pqi_raid_path_request *request;
5552 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5554 return SCSI_MLQUEUE_HOST_BUSY;
5556 io_request->io_complete_callback = pqi_raid_io_complete;
5557 io_request->scmd = scmd;
5559 request = io_request->iu;
5560 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors));
5562 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5563 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5564 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5565 request->command_priority = io_high_prio;
5566 put_unaligned_le16(io_request->index, &request->request_id);
5567 request->error_index = request->request_id;
5568 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
5569 request->ml_device_lun_number = (u8)scmd->device->lun;
5571 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
5572 memcpy(request->cdb, scmd->cmnd, cdb_length);
5574 switch (cdb_length) {
5579 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5582 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4;
5585 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8;
5588 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12;
5592 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16;
5596 switch (scmd->sc_data_direction) {
5597 case DMA_FROM_DEVICE:
5598 request->data_direction = SOP_READ_FLAG;
5601 request->data_direction = SOP_WRITE_FLAG;
5604 request->data_direction = SOP_NO_DIRECTION_FLAG;
5606 case DMA_BIDIRECTIONAL:
5607 request->data_direction = SOP_BIDIRECTIONAL;
5610 dev_err(&ctrl_info->pci_dev->dev,
5611 "unknown data direction: %d\n",
5612 scmd->sc_data_direction);
5616 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
5618 pqi_free_io_request(io_request);
5619 return SCSI_MLQUEUE_HOST_BUSY;
5622 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
5627 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5628 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5629 struct pqi_queue_group *queue_group)
5633 io_high_prio = pqi_is_io_high_priority(device, scmd);
5635 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
5638 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
5640 struct scsi_cmnd *scmd;
5641 struct pqi_scsi_dev *device;
5642 struct pqi_ctrl_info *ctrl_info;
5644 if (!io_request->raid_bypass)
5647 scmd = io_request->scmd;
5648 if ((scmd->result & 0xff) == SAM_STAT_GOOD)
5650 if (host_byte(scmd->result) == DID_NO_CONNECT)
5653 device = scmd->device->hostdata;
5654 if (pqi_device_offline(device) || pqi_device_in_remove(device))
5657 ctrl_info = shost_to_hba(scmd->device->host);
5658 if (pqi_ctrl_offline(ctrl_info))
5664 static void pqi_aio_io_complete(struct pqi_io_request *io_request,
5667 struct scsi_cmnd *scmd;
5669 scmd = io_request->scmd;
5670 scsi_dma_unmap(scmd);
5671 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) {
5672 set_host_byte(scmd, DID_IMM_RETRY);
5673 pqi_cmd_priv(scmd)->this_residual++;
5676 pqi_free_io_request(io_request);
5677 pqi_scsi_done(scmd);
5680 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
5681 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
5682 struct pqi_queue_group *queue_group)
5686 io_high_prio = pqi_is_io_high_priority(device, scmd);
5688 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
5689 scmd->cmnd, scmd->cmd_len, queue_group, NULL,
5690 false, io_high_prio);
5693 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
5694 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
5695 unsigned int cdb_length, struct pqi_queue_group *queue_group,
5696 struct pqi_encryption_info *encryption_info, bool raid_bypass,
5700 struct pqi_io_request *io_request;
5701 struct pqi_aio_path_request *request;
5703 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5705 return SCSI_MLQUEUE_HOST_BUSY;
5707 io_request->io_complete_callback = pqi_aio_io_complete;
5708 io_request->scmd = scmd;
5709 io_request->raid_bypass = raid_bypass;
5711 request = io_request->iu;
5712 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors));
5714 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
5715 put_unaligned_le32(aio_handle, &request->nexus_id);
5716 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
5717 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5718 request->command_priority = io_high_prio;
5719 put_unaligned_le16(io_request->index, &request->request_id);
5720 request->error_index = request->request_id;
5721 if (!raid_bypass && ctrl_info->multi_lun_device_supported)
5722 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number);
5723 if (cdb_length > sizeof(request->cdb))
5724 cdb_length = sizeof(request->cdb);
5725 request->cdb_length = cdb_length;
5726 memcpy(request->cdb, cdb, cdb_length);
5728 switch (scmd->sc_data_direction) {
5730 request->data_direction = SOP_READ_FLAG;
5732 case DMA_FROM_DEVICE:
5733 request->data_direction = SOP_WRITE_FLAG;
5736 request->data_direction = SOP_NO_DIRECTION_FLAG;
5738 case DMA_BIDIRECTIONAL:
5739 request->data_direction = SOP_BIDIRECTIONAL;
5742 dev_err(&ctrl_info->pci_dev->dev,
5743 "unknown data direction: %d\n",
5744 scmd->sc_data_direction);
5748 if (encryption_info) {
5749 request->encryption_enable = true;
5750 put_unaligned_le16(encryption_info->data_encryption_key_index,
5751 &request->data_encryption_key_index);
5752 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5753 &request->encrypt_tweak_lower);
5754 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5755 &request->encrypt_tweak_upper);
5758 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
5760 pqi_free_io_request(io_request);
5761 return SCSI_MLQUEUE_HOST_BUSY;
5764 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5769 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
5770 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5771 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5772 struct pqi_scsi_dev_raid_map_data *rmd)
5775 struct pqi_io_request *io_request;
5776 struct pqi_aio_r1_path_request *r1_request;
5778 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5780 return SCSI_MLQUEUE_HOST_BUSY;
5782 io_request->io_complete_callback = pqi_aio_io_complete;
5783 io_request->scmd = scmd;
5784 io_request->raid_bypass = true;
5786 r1_request = io_request->iu;
5787 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors));
5789 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO;
5790 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id);
5791 r1_request->num_drives = rmd->num_it_nexus_entries;
5792 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5793 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5794 if (rmd->num_it_nexus_entries == 3)
5795 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5797 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length);
5798 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5799 put_unaligned_le16(io_request->index, &r1_request->request_id);
5800 r1_request->error_index = r1_request->request_id;
5801 if (rmd->cdb_length > sizeof(r1_request->cdb))
5802 rmd->cdb_length = sizeof(r1_request->cdb);
5803 r1_request->cdb_length = rmd->cdb_length;
5804 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5806 /* The direction is always write. */
5807 r1_request->data_direction = SOP_READ_FLAG;
5809 if (encryption_info) {
5810 r1_request->encryption_enable = true;
5811 put_unaligned_le16(encryption_info->data_encryption_key_index,
5812 &r1_request->data_encryption_key_index);
5813 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5814 &r1_request->encrypt_tweak_lower);
5815 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5816 &r1_request->encrypt_tweak_upper);
5819 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request);
5821 pqi_free_io_request(io_request);
5822 return SCSI_MLQUEUE_HOST_BUSY;
5825 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5830 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
5831 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
5832 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
5833 struct pqi_scsi_dev_raid_map_data *rmd)
5836 struct pqi_io_request *io_request;
5837 struct pqi_aio_r56_path_request *r56_request;
5839 io_request = pqi_alloc_io_request(ctrl_info, scmd);
5841 return SCSI_MLQUEUE_HOST_BUSY;
5842 io_request->io_complete_callback = pqi_aio_io_complete;
5843 io_request->scmd = scmd;
5844 io_request->raid_bypass = true;
5846 r56_request = io_request->iu;
5847 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors));
5849 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51)
5850 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO;
5852 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO;
5854 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id);
5855 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5856 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5857 if (rmd->raid_level == SA_RAID_6) {
5858 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5859 r56_request->xor_multiplier = rmd->xor_mult;
5861 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length);
5862 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5863 put_unaligned_le64(rmd->row, &r56_request->row);
5865 put_unaligned_le16(io_request->index, &r56_request->request_id);
5866 r56_request->error_index = r56_request->request_id;
5868 if (rmd->cdb_length > sizeof(r56_request->cdb))
5869 rmd->cdb_length = sizeof(r56_request->cdb);
5870 r56_request->cdb_length = rmd->cdb_length;
5871 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5873 /* The direction is always write. */
5874 r56_request->data_direction = SOP_READ_FLAG;
5876 if (encryption_info) {
5877 r56_request->encryption_enable = true;
5878 put_unaligned_le16(encryption_info->data_encryption_key_index,
5879 &r56_request->data_encryption_key_index);
5880 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
5881 &r56_request->encrypt_tweak_lower);
5882 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
5883 &r56_request->encrypt_tweak_upper);
5886 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request);
5888 pqi_free_io_request(io_request);
5889 return SCSI_MLQUEUE_HOST_BUSY;
5892 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
5897 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
5898 struct scsi_cmnd *scmd)
5901 * We are setting host_tagset = 1 during init.
5903 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd)));
5906 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
5908 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd)))
5911 return pqi_cmd_priv(scmd)->this_residual == 0;
5915 * This function gets called just before we hand the completed SCSI request
5919 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
5921 struct pqi_scsi_dev *device;
5922 struct completion *wait;
5924 if (!scmd->device) {
5925 set_host_byte(scmd, DID_NO_CONNECT);
5929 device = scmd->device->hostdata;
5931 set_host_byte(scmd, DID_NO_CONNECT);
5935 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]);
5937 wait = (struct completion *)xchg(&scmd->host_scribble, NULL);
5938 if (wait != PQI_NO_COMPLETION)
5942 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
5943 struct scsi_cmnd *scmd)
5949 struct pqi_scsi_dev *device;
5950 struct pqi_stream_data *pqi_stream_data;
5951 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
5953 if (!ctrl_info->enable_stream_detection)
5956 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5960 /* Check writes only. */
5964 device = scmd->device->hostdata;
5966 /* Check for RAID 5/6 streams. */
5967 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6)
5971 * If controller does not support AIO RAID{5,6} writes, need to send
5972 * requests down non-AIO path.
5974 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) ||
5975 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes))
5979 oldest_jiffies = INT_MAX;
5980 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) {
5981 pqi_stream_data = &device->stream_data[i];
5983 * Check for adjacent request or request is within
5984 * the previous request.
5986 if ((pqi_stream_data->next_lba &&
5987 rmd.first_block >= pqi_stream_data->next_lba) &&
5988 rmd.first_block <= pqi_stream_data->next_lba +
5990 pqi_stream_data->next_lba = rmd.first_block +
5992 pqi_stream_data->last_accessed = jiffies;
5993 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++;
5998 if (pqi_stream_data->last_accessed == 0) {
6003 /* Find entry with oldest last accessed time. */
6004 if (pqi_stream_data->last_accessed <= oldest_jiffies) {
6005 oldest_jiffies = pqi_stream_data->last_accessed;
6010 /* Set LRU entry. */
6011 pqi_stream_data = &device->stream_data[lru_index];
6012 pqi_stream_data->last_accessed = jiffies;
6013 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;
6018 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
6021 struct pqi_ctrl_info *ctrl_info;
6022 struct pqi_scsi_dev *device;
6024 struct pqi_queue_group *queue_group;
6028 scmd->host_scribble = PQI_NO_COMPLETION;
6030 device = scmd->device->hostdata;
6033 set_host_byte(scmd, DID_NO_CONNECT);
6034 pqi_scsi_done(scmd);
6038 lun = (u8)scmd->device->lun;
6040 atomic_inc(&device->scsi_cmds_outstanding[lun]);
6042 ctrl_info = shost_to_hba(shost);
6044 if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) {
6045 set_host_byte(scmd, DID_NO_CONNECT);
6046 pqi_scsi_done(scmd);
6050 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) {
6051 rc = SCSI_MLQUEUE_HOST_BUSY;
6056 * This is necessary because the SML doesn't zero out this field during
6061 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
6062 queue_group = &ctrl_info->queue_groups[hw_queue];
6064 if (pqi_is_logical_device(device)) {
6065 raid_bypassed = false;
6066 if (device->raid_bypass_enabled &&
6067 pqi_is_bypass_eligible_request(scmd) &&
6068 !pqi_is_parity_write_stream(ctrl_info, scmd)) {
6069 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6070 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
6071 raid_bypassed = true;
6072 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++;
6076 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6078 if (device->aio_enabled)
6079 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6081 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
6086 scmd->host_scribble = NULL;
6087 atomic_dec(&device->scsi_cmds_outstanding[lun]);
6093 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
6097 unsigned long flags;
6098 unsigned int queued_io_count;
6099 struct pqi_queue_group *queue_group;
6100 struct pqi_io_request *io_request;
6102 queued_io_count = 0;
6104 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6105 queue_group = &ctrl_info->queue_groups[i];
6106 for (path = 0; path < 2; path++) {
6107 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
6108 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry)
6110 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
6114 return queued_io_count;
6117 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
6121 unsigned int nonempty_inbound_queue_count;
6122 struct pqi_queue_group *queue_group;
6126 nonempty_inbound_queue_count = 0;
6128 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6129 queue_group = &ctrl_info->queue_groups[i];
6130 for (path = 0; path < 2; path++) {
6131 iq_pi = queue_group->iq_pi_copy[path];
6132 iq_ci = readl(queue_group->iq_ci[path]);
6134 nonempty_inbound_queue_count++;
6138 return nonempty_inbound_queue_count;
6141 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10
6143 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
6145 unsigned long start_jiffies;
6146 unsigned long warning_timeout;
6147 unsigned int queued_io_count;
6148 unsigned int nonempty_inbound_queue_count;
6149 bool displayed_warning;
6151 displayed_warning = false;
6152 start_jiffies = jiffies;
6153 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6156 queued_io_count = pqi_queued_io_count(ctrl_info);
6157 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info);
6158 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0)
6160 pqi_check_ctrl_health(ctrl_info);
6161 if (pqi_ctrl_offline(ctrl_info))
6163 if (time_after(jiffies, warning_timeout)) {
6164 dev_warn(&ctrl_info->pci_dev->dev,
6165 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n",
6166 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count);
6167 displayed_warning = true;
6168 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6170 usleep_range(1000, 2000);
6173 if (displayed_warning)
6174 dev_warn(&ctrl_info->pci_dev->dev,
6175 "queued I/O drained after waiting for %u seconds\n",
6176 jiffies_to_msecs(jiffies - start_jiffies) / 1000);
6181 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
6182 struct pqi_scsi_dev *device, u8 lun)
6186 struct pqi_queue_group *queue_group;
6187 unsigned long flags;
6188 struct pqi_io_request *io_request;
6189 struct pqi_io_request *next;
6190 struct scsi_cmnd *scmd;
6191 struct pqi_scsi_dev *scsi_device;
6193 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
6194 queue_group = &ctrl_info->queue_groups[i];
6196 for (path = 0; path < 2; path++) {
6198 &queue_group->submit_lock[path], flags);
6200 list_for_each_entry_safe(io_request, next,
6201 &queue_group->request_list[path],
6202 request_list_entry) {
6204 scmd = io_request->scmd;
6208 scsi_device = scmd->device->hostdata;
6210 list_del(&io_request->request_list_entry);
6211 if (scsi_device == device && (u8)scmd->device->lun == lun)
6212 set_host_byte(scmd, DID_RESET);
6214 set_host_byte(scmd, DID_REQUEUE);
6215 pqi_free_io_request(io_request);
6216 scsi_dma_unmap(scmd);
6217 pqi_scsi_done(scmd);
6220 spin_unlock_irqrestore(
6221 &queue_group->submit_lock[path], flags);
6226 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10
6228 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
6229 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
6231 int cmds_outstanding;
6232 unsigned long start_jiffies;
6233 unsigned long warning_timeout;
6234 unsigned long msecs_waiting;
6236 start_jiffies = jiffies;
6237 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies;
6239 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) {
6240 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) {
6241 pqi_check_ctrl_health(ctrl_info);
6242 if (pqi_ctrl_offline(ctrl_info))
6245 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies);
6246 if (msecs_waiting >= timeout_msecs) {
6247 dev_err(&ctrl_info->pci_dev->dev,
6248 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n",
6249 ctrl_info->scsi_host->host_no, device->bus, device->target,
6250 lun, msecs_waiting / 1000, cmds_outstanding);
6253 if (time_after(jiffies, warning_timeout)) {
6254 dev_warn(&ctrl_info->pci_dev->dev,
6255 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n",
6256 ctrl_info->scsi_host->host_no, device->bus, device->target,
6257 lun, msecs_waiting / 1000, cmds_outstanding);
6258 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies;
6260 usleep_range(1000, 2000);
6266 static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
6269 struct completion *waiting = context;
6274 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
6276 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
6277 struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
6280 unsigned int wait_secs;
6281 int cmds_outstanding;
6286 if (wait_for_completion_io_timeout(wait,
6287 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) {
6292 pqi_check_ctrl_health(ctrl_info);
6293 if (pqi_ctrl_offline(ctrl_info)) {
6298 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS;
6299 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]);
6300 dev_warn(&ctrl_info->pci_dev->dev,
6301 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n",
6302 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding);
6308 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30
6310 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6313 struct pqi_io_request *io_request;
6314 DECLARE_COMPLETION_ONSTACK(wait);
6315 struct pqi_task_management_request *request;
6317 io_request = pqi_alloc_io_request(ctrl_info, NULL);
6318 io_request->io_complete_callback = pqi_lun_reset_complete;
6319 io_request->context = &wait;
6321 request = io_request->iu;
6322 memset(request, 0, sizeof(*request));
6324 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
6325 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
6326 &request->header.iu_length);
6327 put_unaligned_le16(io_request->index, &request->request_id);
6328 memcpy(request->lun_number, device->scsi3addr,
6329 sizeof(request->lun_number));
6330 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
6331 request->ml_device_lun_number = lun;
6332 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
6333 if (ctrl_info->tmf_iu_timeout_supported)
6334 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout);
6336 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
6339 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait);
6341 rc = io_request->status;
6343 pqi_free_io_request(io_request);
6348 #define PQI_LUN_RESET_RETRIES 3
6349 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000)
6350 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000)
6351 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000)
6353 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6357 unsigned int retries;
6358 unsigned long timeout_msecs;
6360 for (retries = 0;;) {
6361 reset_rc = pqi_lun_reset(ctrl_info, device, lun);
6362 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES)
6364 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS);
6367 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS :
6368 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS;
6370 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs);
6371 if (wait_rc && reset_rc == 0)
6374 return reset_rc == 0 ? SUCCESS : FAILED;
6377 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
6381 pqi_ctrl_block_requests(ctrl_info);
6382 pqi_ctrl_wait_until_quiesced(ctrl_info);
6383 pqi_fail_io_queued_for_device(ctrl_info, device, lun);
6384 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
6385 pqi_device_reset_start(device, lun);
6386 pqi_ctrl_unblock_requests(ctrl_info);
6390 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun);
6391 pqi_device_reset_done(device, lun);
6396 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
6400 mutex_lock(&ctrl_info->lun_reset_mutex);
6402 dev_err(&ctrl_info->pci_dev->dev,
6403 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n",
6404 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode);
6406 pqi_check_ctrl_health(ctrl_info);
6407 if (pqi_ctrl_offline(ctrl_info))
6410 rc = pqi_device_reset(ctrl_info, device, lun);
6412 dev_err(&ctrl_info->pci_dev->dev,
6413 "reset of scsi %d:%d:%d:%u: %s\n",
6414 ctrl_info->scsi_host->host_no, device->bus, device->target, lun,
6415 rc == SUCCESS ? "SUCCESS" : "FAILED");
6417 mutex_unlock(&ctrl_info->lun_reset_mutex);
6422 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
6424 struct Scsi_Host *shost;
6425 struct pqi_ctrl_info *ctrl_info;
6426 struct pqi_scsi_dev *device;
6429 shost = scmd->device->host;
6430 ctrl_info = shost_to_hba(shost);
6431 device = scmd->device->hostdata;
6432 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6434 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode);
6437 static void pqi_tmf_worker(struct work_struct *work)
6439 struct pqi_tmf_work *tmf_work;
6440 struct scsi_cmnd *scmd;
6442 tmf_work = container_of(work, struct pqi_tmf_work, work_struct);
6443 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL);
6445 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode);
6448 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
6450 struct Scsi_Host *shost;
6451 struct pqi_ctrl_info *ctrl_info;
6452 struct pqi_scsi_dev *device;
6453 struct pqi_tmf_work *tmf_work;
6454 DECLARE_COMPLETION_ONSTACK(wait);
6456 shost = scmd->device->host;
6457 ctrl_info = shost_to_hba(shost);
6458 device = scmd->device->hostdata;
6460 dev_err(&ctrl_info->pci_dev->dev,
6461 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n",
6462 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6464 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) {
6465 dev_err(&ctrl_info->pci_dev->dev,
6466 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n",
6467 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6468 scmd->result = DID_RESET << 16;
6472 tmf_work = &device->tmf_work[scmd->device->lun];
6474 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) {
6475 tmf_work->ctrl_info = ctrl_info;
6476 tmf_work->device = device;
6477 tmf_work->lun = (u8)scmd->device->lun;
6478 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff;
6479 schedule_work(&tmf_work->work_struct);
6482 wait_for_completion(&wait);
6484 dev_err(&ctrl_info->pci_dev->dev,
6485 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n",
6486 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd);
6493 static int pqi_slave_alloc(struct scsi_device *sdev)
6495 struct pqi_scsi_dev *device;
6496 unsigned long flags;
6497 struct pqi_ctrl_info *ctrl_info;
6498 struct scsi_target *starget;
6499 struct sas_rphy *rphy;
6501 ctrl_info = shost_to_hba(sdev->host);
6503 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6505 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
6506 starget = scsi_target(sdev);
6507 rphy = target_to_rphy(starget);
6508 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
6510 if (device->target_lun_valid) {
6511 device->ignore_device = true;
6513 device->target = sdev_id(sdev);
6514 device->lun = sdev->lun;
6515 device->target_lun_valid = true;
6519 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
6520 sdev_id(sdev), sdev->lun);
6524 sdev->hostdata = device;
6525 device->sdev = sdev;
6526 if (device->queue_depth) {
6527 device->advertised_queue_depth = device->queue_depth;
6528 scsi_change_queue_depth(sdev,
6529 device->advertised_queue_depth);
6531 if (pqi_is_logical_device(device)) {
6532 pqi_disable_write_same(sdev);
6534 sdev->allow_restart = 1;
6535 if (device->device_type == SA_DEVICE_TYPE_NVME)
6536 pqi_disable_write_same(sdev);
6540 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6545 static void pqi_map_queues(struct Scsi_Host *shost)
6547 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
6549 if (!ctrl_info->disable_managed_interrupts)
6550 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
6551 ctrl_info->pci_dev, 0);
6553 return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
6556 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
6558 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER;
6561 static int pqi_slave_configure(struct scsi_device *sdev)
6564 struct pqi_scsi_dev *device;
6566 device = sdev->hostdata;
6567 device->devtype = sdev->type;
6569 if (pqi_is_tape_changer_device(device) && device->ignore_device) {
6571 device->ignore_device = false;
6577 static void pqi_slave_destroy(struct scsi_device *sdev)
6579 struct pqi_ctrl_info *ctrl_info;
6580 struct pqi_scsi_dev *device;
6582 unsigned long flags;
6584 ctrl_info = shost_to_hba(sdev->host);
6586 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex);
6587 if (!mutex_acquired)
6590 device = sdev->hostdata;
6592 mutex_unlock(&ctrl_info->scan_mutex);
6596 device->lun_count--;
6597 if (device->lun_count > 0) {
6598 mutex_unlock(&ctrl_info->scan_mutex);
6602 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
6603 list_del(&device->scsi_device_list_entry);
6604 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
6606 mutex_unlock(&ctrl_info->scan_mutex);
6608 pqi_dev_info(ctrl_info, "removed", device);
6609 pqi_free_device(device);
6612 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6614 struct pci_dev *pci_dev;
6615 u32 subsystem_vendor;
6616 u32 subsystem_device;
6617 cciss_pci_info_struct pci_info;
6622 pci_dev = ctrl_info->pci_dev;
6624 pci_info.domain = pci_domain_nr(pci_dev->bus);
6625 pci_info.bus = pci_dev->bus->number;
6626 pci_info.dev_fn = pci_dev->devfn;
6627 subsystem_vendor = pci_dev->subsystem_vendor;
6628 subsystem_device = pci_dev->subsystem_device;
6629 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
6631 if (copy_to_user(arg, &pci_info, sizeof(pci_info)))
6637 static int pqi_getdrivver_ioctl(void __user *arg)
6644 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
6645 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
6647 if (copy_to_user(arg, &version, sizeof(version)))
6653 struct ciss_error_info {
6656 size_t sense_data_length;
6659 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
6660 struct ciss_error_info *ciss_error_info)
6662 int ciss_cmd_status;
6663 size_t sense_data_length;
6665 switch (pqi_error_info->data_out_result) {
6666 case PQI_DATA_IN_OUT_GOOD:
6667 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
6669 case PQI_DATA_IN_OUT_UNDERFLOW:
6670 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
6672 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
6673 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
6675 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
6676 case PQI_DATA_IN_OUT_BUFFER_ERROR:
6677 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
6678 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
6679 case PQI_DATA_IN_OUT_ERROR:
6680 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
6682 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
6683 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
6684 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
6685 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
6686 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
6687 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
6688 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
6689 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
6690 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
6691 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
6692 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
6694 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
6695 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
6697 case PQI_DATA_IN_OUT_ABORTED:
6698 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
6700 case PQI_DATA_IN_OUT_TIMEOUT:
6701 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
6704 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
6709 get_unaligned_le16(&pqi_error_info->sense_data_length);
6710 if (sense_data_length == 0)
6712 get_unaligned_le16(&pqi_error_info->response_data_length);
6713 if (sense_data_length)
6714 if (sense_data_length > sizeof(pqi_error_info->data))
6715 sense_data_length = sizeof(pqi_error_info->data);
6717 ciss_error_info->scsi_status = pqi_error_info->status;
6718 ciss_error_info->command_status = ciss_cmd_status;
6719 ciss_error_info->sense_data_length = sense_data_length;
6722 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
6725 char *kernel_buffer = NULL;
6727 size_t sense_data_length;
6728 IOCTL_Command_struct iocommand;
6729 struct pqi_raid_path_request request;
6730 struct pqi_raid_error_info pqi_error_info;
6731 struct ciss_error_info ciss_error_info;
6733 if (pqi_ctrl_offline(ctrl_info))
6735 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info))
6739 if (!capable(CAP_SYS_RAWIO))
6741 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
6743 if (iocommand.buf_size < 1 &&
6744 iocommand.Request.Type.Direction != XFER_NONE)
6746 if (iocommand.Request.CDBLen > sizeof(request.cdb))
6748 if (iocommand.Request.Type.Type != TYPE_CMD)
6751 switch (iocommand.Request.Type.Direction) {
6755 case XFER_READ | XFER_WRITE:
6761 if (iocommand.buf_size > 0) {
6762 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
6765 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6766 if (copy_from_user(kernel_buffer, iocommand.buf,
6767 iocommand.buf_size)) {
6772 memset(kernel_buffer, 0, iocommand.buf_size);
6776 memset(&request, 0, sizeof(request));
6778 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
6779 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
6780 PQI_REQUEST_HEADER_LENGTH;
6781 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
6782 sizeof(request.lun_number));
6783 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
6784 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
6786 switch (iocommand.Request.Type.Direction) {
6788 request.data_direction = SOP_NO_DIRECTION_FLAG;
6791 request.data_direction = SOP_WRITE_FLAG;
6794 request.data_direction = SOP_READ_FLAG;
6796 case XFER_READ | XFER_WRITE:
6797 request.data_direction = SOP_BIDIRECTIONAL;
6801 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
6803 if (iocommand.buf_size > 0) {
6804 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
6806 rc = pqi_map_single(ctrl_info->pci_dev,
6807 &request.sg_descriptors[0], kernel_buffer,
6808 iocommand.buf_size, DMA_BIDIRECTIONAL);
6812 iu_length += sizeof(request.sg_descriptors[0]);
6815 put_unaligned_le16(iu_length, &request.header.iu_length);
6817 if (ctrl_info->raid_iu_timeout_supported)
6818 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout);
6820 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
6821 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info);
6823 if (iocommand.buf_size > 0)
6824 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
6827 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
6830 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
6831 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
6832 iocommand.error_info.CommandStatus =
6833 ciss_error_info.command_status;
6834 sense_data_length = ciss_error_info.sense_data_length;
6835 if (sense_data_length) {
6836 if (sense_data_length >
6837 sizeof(iocommand.error_info.SenseInfo))
6839 sizeof(iocommand.error_info.SenseInfo);
6840 memcpy(iocommand.error_info.SenseInfo,
6841 pqi_error_info.data, sense_data_length);
6842 iocommand.error_info.SenseLen = sense_data_length;
6846 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
6851 if (rc == 0 && iocommand.buf_size > 0 &&
6852 (iocommand.Request.Type.Direction & XFER_READ)) {
6853 if (copy_to_user(iocommand.buf, kernel_buffer,
6854 iocommand.buf_size)) {
6860 kfree(kernel_buffer);
6865 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
6869 struct pqi_ctrl_info *ctrl_info;
6871 ctrl_info = shost_to_hba(sdev->host);
6874 case CCISS_DEREGDISK:
6875 case CCISS_REGNEWDISK:
6877 rc = pqi_scan_scsi_devices(ctrl_info);
6879 case CCISS_GETPCIINFO:
6880 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
6882 case CCISS_GETDRIVVER:
6883 rc = pqi_getdrivver_ioctl(arg);
6885 case CCISS_PASSTHRU:
6886 rc = pqi_passthru_ioctl(ctrl_info, arg);
6896 static ssize_t pqi_firmware_version_show(struct device *dev,
6897 struct device_attribute *attr, char *buffer)
6899 struct Scsi_Host *shost;
6900 struct pqi_ctrl_info *ctrl_info;
6902 shost = class_to_shost(dev);
6903 ctrl_info = shost_to_hba(shost);
6905 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version);
6908 static ssize_t pqi_serial_number_show(struct device *dev,
6909 struct device_attribute *attr, char *buffer)
6911 struct Scsi_Host *shost;
6912 struct pqi_ctrl_info *ctrl_info;
6914 shost = class_to_shost(dev);
6915 ctrl_info = shost_to_hba(shost);
6917 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number);
6920 static ssize_t pqi_model_show(struct device *dev,
6921 struct device_attribute *attr, char *buffer)
6923 struct Scsi_Host *shost;
6924 struct pqi_ctrl_info *ctrl_info;
6926 shost = class_to_shost(dev);
6927 ctrl_info = shost_to_hba(shost);
6929 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model);
6932 static ssize_t pqi_vendor_show(struct device *dev,
6933 struct device_attribute *attr, char *buffer)
6935 struct Scsi_Host *shost;
6936 struct pqi_ctrl_info *ctrl_info;
6938 shost = class_to_shost(dev);
6939 ctrl_info = shost_to_hba(shost);
6941 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor);
6944 static ssize_t pqi_host_rescan_store(struct device *dev,
6945 struct device_attribute *attr, const char *buffer, size_t count)
6947 struct Scsi_Host *shost = class_to_shost(dev);
6949 pqi_scan_start(shost);
6954 static ssize_t pqi_lockup_action_show(struct device *dev,
6955 struct device_attribute *attr, char *buffer)
6960 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6961 if (pqi_lockup_actions[i].action == pqi_lockup_action)
6962 count += scnprintf(buffer + count, PAGE_SIZE - count,
6963 "[%s] ", pqi_lockup_actions[i].name);
6965 count += scnprintf(buffer + count, PAGE_SIZE - count,
6966 "%s ", pqi_lockup_actions[i].name);
6969 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n");
6974 static ssize_t pqi_lockup_action_store(struct device *dev,
6975 struct device_attribute *attr, const char *buffer, size_t count)
6979 char action_name_buffer[32];
6981 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer));
6982 action_name = strstrip(action_name_buffer);
6984 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
6985 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
6986 pqi_lockup_action = pqi_lockup_actions[i].action;
6994 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
6995 struct device_attribute *attr, char *buffer)
6997 struct Scsi_Host *shost = class_to_shost(dev);
6998 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7000 return scnprintf(buffer, 10, "%x\n",
7001 ctrl_info->enable_stream_detection);
7004 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
7005 struct device_attribute *attr, const char *buffer, size_t count)
7007 struct Scsi_Host *shost = class_to_shost(dev);
7008 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7009 u8 set_stream_detection = 0;
7011 if (kstrtou8(buffer, 0, &set_stream_detection))
7014 if (set_stream_detection > 0)
7015 set_stream_detection = 1;
7017 ctrl_info->enable_stream_detection = set_stream_detection;
7022 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
7023 struct device_attribute *attr, char *buffer)
7025 struct Scsi_Host *shost = class_to_shost(dev);
7026 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7028 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes);
7031 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
7032 struct device_attribute *attr, const char *buffer, size_t count)
7034 struct Scsi_Host *shost = class_to_shost(dev);
7035 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7036 u8 set_r5_writes = 0;
7038 if (kstrtou8(buffer, 0, &set_r5_writes))
7041 if (set_r5_writes > 0)
7044 ctrl_info->enable_r5_writes = set_r5_writes;
7049 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
7050 struct device_attribute *attr, char *buffer)
7052 struct Scsi_Host *shost = class_to_shost(dev);
7053 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7055 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes);
7058 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
7059 struct device_attribute *attr, const char *buffer, size_t count)
7061 struct Scsi_Host *shost = class_to_shost(dev);
7062 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
7063 u8 set_r6_writes = 0;
7065 if (kstrtou8(buffer, 0, &set_r6_writes))
7068 if (set_r6_writes > 0)
7071 ctrl_info->enable_r6_writes = set_r6_writes;
7076 static DEVICE_STRING_ATTR_RO(driver_version, 0444,
7077 DRIVER_VERSION BUILD_TIMESTAMP);
7078 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
7079 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
7080 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
7081 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
7082 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
7083 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
7084 pqi_lockup_action_store);
7085 static DEVICE_ATTR(enable_stream_detection, 0644,
7086 pqi_host_enable_stream_detection_show,
7087 pqi_host_enable_stream_detection_store);
7088 static DEVICE_ATTR(enable_r5_writes, 0644,
7089 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
7090 static DEVICE_ATTR(enable_r6_writes, 0644,
7091 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);
7093 static struct attribute *pqi_shost_attrs[] = {
7094 &dev_attr_driver_version.attr.attr,
7095 &dev_attr_firmware_version.attr,
7096 &dev_attr_model.attr,
7097 &dev_attr_serial_number.attr,
7098 &dev_attr_vendor.attr,
7099 &dev_attr_rescan.attr,
7100 &dev_attr_lockup_action.attr,
7101 &dev_attr_enable_stream_detection.attr,
7102 &dev_attr_enable_r5_writes.attr,
7103 &dev_attr_enable_r6_writes.attr,
7107 ATTRIBUTE_GROUPS(pqi_shost);
7109 static ssize_t pqi_unique_id_show(struct device *dev,
7110 struct device_attribute *attr, char *buffer)
7112 struct pqi_ctrl_info *ctrl_info;
7113 struct scsi_device *sdev;
7114 struct pqi_scsi_dev *device;
7115 unsigned long flags;
7118 sdev = to_scsi_device(dev);
7119 ctrl_info = shost_to_hba(sdev->host);
7121 if (pqi_ctrl_offline(ctrl_info))
7124 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7126 device = sdev->hostdata;
7128 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7132 if (device->is_physical_device)
7133 memcpy(unique_id, device->wwid, sizeof(device->wwid));
7135 memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
7137 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7139 return scnprintf(buffer, PAGE_SIZE,
7140 "%02X%02X%02X%02X%02X%02X%02X%02X"
7141 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
7142 unique_id[0], unique_id[1], unique_id[2], unique_id[3],
7143 unique_id[4], unique_id[5], unique_id[6], unique_id[7],
7144 unique_id[8], unique_id[9], unique_id[10], unique_id[11],
7145 unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
7148 static ssize_t pqi_lunid_show(struct device *dev,
7149 struct device_attribute *attr, char *buffer)
7151 struct pqi_ctrl_info *ctrl_info;
7152 struct scsi_device *sdev;
7153 struct pqi_scsi_dev *device;
7154 unsigned long flags;
7157 sdev = to_scsi_device(dev);
7158 ctrl_info = shost_to_hba(sdev->host);
7160 if (pqi_ctrl_offline(ctrl_info))
7163 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7165 device = sdev->hostdata;
7167 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7171 memcpy(lunid, device->scsi3addr, sizeof(lunid));
7173 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7175 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid);
7180 static ssize_t pqi_path_info_show(struct device *dev,
7181 struct device_attribute *attr, char *buf)
7183 struct pqi_ctrl_info *ctrl_info;
7184 struct scsi_device *sdev;
7185 struct pqi_scsi_dev *device;
7186 unsigned long flags;
7193 u8 phys_connector[2];
7195 sdev = to_scsi_device(dev);
7196 ctrl_info = shost_to_hba(sdev->host);
7198 if (pqi_ctrl_offline(ctrl_info))
7201 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7203 device = sdev->hostdata;
7205 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7210 for (i = 0; i < MAX_PATHS; i++) {
7211 path_map_index = 1 << i;
7212 if (i == device->active_path_index)
7214 else if (device->path_map & path_map_index)
7215 active = "Inactive";
7219 output_len += scnprintf(buf + output_len,
7220 PAGE_SIZE - output_len,
7221 "[%d:%d:%d:%d] %20.20s ",
7222 ctrl_info->scsi_host->host_no,
7223 device->bus, device->target,
7225 scsi_device_type(device->devtype));
7227 if (device->devtype == TYPE_RAID ||
7228 pqi_is_logical_device(device))
7231 memcpy(&phys_connector, &device->phys_connector[i],
7232 sizeof(phys_connector));
7233 if (phys_connector[0] < '0')
7234 phys_connector[0] = '0';
7235 if (phys_connector[1] < '0')
7236 phys_connector[1] = '0';
7238 output_len += scnprintf(buf + output_len,
7239 PAGE_SIZE - output_len,
7240 "PORT: %.2s ", phys_connector);
7242 box = device->box[i];
7243 if (box != 0 && box != 0xFF)
7244 output_len += scnprintf(buf + output_len,
7245 PAGE_SIZE - output_len,
7248 if ((device->devtype == TYPE_DISK ||
7249 device->devtype == TYPE_ZBC) &&
7250 pqi_expose_device(device))
7251 output_len += scnprintf(buf + output_len,
7252 PAGE_SIZE - output_len,
7256 output_len += scnprintf(buf + output_len,
7257 PAGE_SIZE - output_len,
7261 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7266 static ssize_t pqi_sas_address_show(struct device *dev,
7267 struct device_attribute *attr, char *buffer)
7269 struct pqi_ctrl_info *ctrl_info;
7270 struct scsi_device *sdev;
7271 struct pqi_scsi_dev *device;
7272 unsigned long flags;
7275 sdev = to_scsi_device(dev);
7276 ctrl_info = shost_to_hba(sdev->host);
7278 if (pqi_ctrl_offline(ctrl_info))
7281 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7283 device = sdev->hostdata;
7285 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7289 sas_address = device->sas_address;
7291 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7293 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
7296 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
7297 struct device_attribute *attr, char *buffer)
7299 struct pqi_ctrl_info *ctrl_info;
7300 struct scsi_device *sdev;
7301 struct pqi_scsi_dev *device;
7302 unsigned long flags;
7304 sdev = to_scsi_device(dev);
7305 ctrl_info = shost_to_hba(sdev->host);
7307 if (pqi_ctrl_offline(ctrl_info))
7310 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7312 device = sdev->hostdata;
7314 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7318 buffer[0] = device->raid_bypass_enabled ? '1' : '0';
7322 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7327 static ssize_t pqi_raid_level_show(struct device *dev,
7328 struct device_attribute *attr, char *buffer)
7330 struct pqi_ctrl_info *ctrl_info;
7331 struct scsi_device *sdev;
7332 struct pqi_scsi_dev *device;
7333 unsigned long flags;
7336 sdev = to_scsi_device(dev);
7337 ctrl_info = shost_to_hba(sdev->host);
7339 if (pqi_ctrl_offline(ctrl_info))
7342 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7344 device = sdev->hostdata;
7346 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7350 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK)
7351 raid_level = pqi_raid_level_to_string(device->raid_level);
7355 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7357 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
7360 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
7361 struct device_attribute *attr, char *buffer)
7363 struct pqi_ctrl_info *ctrl_info;
7364 struct scsi_device *sdev;
7365 struct pqi_scsi_dev *device;
7366 unsigned long flags;
7367 u64 raid_bypass_cnt;
7370 sdev = to_scsi_device(dev);
7371 ctrl_info = shost_to_hba(sdev->host);
7373 if (pqi_ctrl_offline(ctrl_info))
7376 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7378 device = sdev->hostdata;
7380 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7384 raid_bypass_cnt = 0;
7386 if (device->raid_io_stats) {
7387 for_each_online_cpu(cpu) {
7388 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt;
7392 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7394 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt);
7397 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
7398 struct device_attribute *attr, char *buf)
7400 struct pqi_ctrl_info *ctrl_info;
7401 struct scsi_device *sdev;
7402 struct pqi_scsi_dev *device;
7403 unsigned long flags;
7406 sdev = to_scsi_device(dev);
7407 ctrl_info = shost_to_hba(sdev->host);
7409 if (pqi_ctrl_offline(ctrl_info))
7412 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7414 device = sdev->hostdata;
7416 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7420 output_len = snprintf(buf, PAGE_SIZE, "%d\n",
7421 device->ncq_prio_enable);
7422 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7427 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
7428 struct device_attribute *attr,
7429 const char *buf, size_t count)
7431 struct pqi_ctrl_info *ctrl_info;
7432 struct scsi_device *sdev;
7433 struct pqi_scsi_dev *device;
7434 unsigned long flags;
7435 u8 ncq_prio_enable = 0;
7437 if (kstrtou8(buf, 0, &ncq_prio_enable))
7440 sdev = to_scsi_device(dev);
7441 ctrl_info = shost_to_hba(sdev->host);
7443 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7445 device = sdev->hostdata;
7448 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7452 if (!device->ncq_prio_support) {
7453 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7457 device->ncq_prio_enable = ncq_prio_enable;
7459 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7464 static ssize_t pqi_numa_node_show(struct device *dev,
7465 struct device_attribute *attr, char *buffer)
7467 struct scsi_device *sdev;
7468 struct pqi_ctrl_info *ctrl_info;
7470 sdev = to_scsi_device(dev);
7471 ctrl_info = shost_to_hba(sdev->host);
7473 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
7476 static ssize_t pqi_write_stream_cnt_show(struct device *dev,
7477 struct device_attribute *attr, char *buffer)
7479 struct pqi_ctrl_info *ctrl_info;
7480 struct scsi_device *sdev;
7481 struct pqi_scsi_dev *device;
7482 unsigned long flags;
7483 u64 write_stream_cnt;
7486 sdev = to_scsi_device(dev);
7487 ctrl_info = shost_to_hba(sdev->host);
7489 if (pqi_ctrl_offline(ctrl_info))
7492 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
7494 device = sdev->hostdata;
7496 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7500 write_stream_cnt = 0;
7502 if (device->raid_io_stats) {
7503 for_each_online_cpu(cpu) {
7504 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt;
7508 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
7510 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt);
7513 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
7514 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
7515 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
7516 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
7517 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
7518 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
7519 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
7520 static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
7521 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
7522 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
7523 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL);
7525 static struct attribute *pqi_sdev_attrs[] = {
7526 &dev_attr_lunid.attr,
7527 &dev_attr_unique_id.attr,
7528 &dev_attr_path_info.attr,
7529 &dev_attr_sas_address.attr,
7530 &dev_attr_ssd_smart_path_enabled.attr,
7531 &dev_attr_raid_level.attr,
7532 &dev_attr_raid_bypass_cnt.attr,
7533 &dev_attr_sas_ncq_prio_enable.attr,
7534 &dev_attr_numa_node.attr,
7535 &dev_attr_write_stream_cnt.attr,
7539 ATTRIBUTE_GROUPS(pqi_sdev);
7541 static const struct scsi_host_template pqi_driver_template = {
7542 .module = THIS_MODULE,
7543 .name = DRIVER_NAME_SHORT,
7544 .proc_name = DRIVER_NAME_SHORT,
7545 .queuecommand = pqi_scsi_queue_command,
7546 .scan_start = pqi_scan_start,
7547 .scan_finished = pqi_scan_finished,
7549 .eh_device_reset_handler = pqi_eh_device_reset_handler,
7550 .eh_abort_handler = pqi_eh_abort_handler,
7552 .slave_alloc = pqi_slave_alloc,
7553 .slave_configure = pqi_slave_configure,
7554 .slave_destroy = pqi_slave_destroy,
7555 .map_queues = pqi_map_queues,
7556 .sdev_groups = pqi_sdev_groups,
7557 .shost_groups = pqi_shost_groups,
7558 .cmd_size = sizeof(struct pqi_cmd_priv),
7561 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
7564 struct Scsi_Host *shost;
7566 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
7568 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n");
7573 shost->n_io_port = 0;
7574 shost->this_id = -1;
7575 shost->max_channel = PQI_MAX_BUS;
7576 shost->max_cmd_len = MAX_COMMAND_SIZE;
7577 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE;
7579 shost->max_sectors = ctrl_info->max_sectors;
7580 shost->can_queue = ctrl_info->scsi_ml_can_queue;
7581 shost->cmd_per_lun = shost->can_queue;
7582 shost->sg_tablesize = ctrl_info->sg_tablesize;
7583 shost->transportt = pqi_sas_transport_template;
7584 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
7585 shost->unique_id = shost->irq;
7586 shost->nr_hw_queues = ctrl_info->num_queue_groups;
7587 shost->host_tagset = 1;
7588 shost->hostdata[0] = (unsigned long)ctrl_info;
7590 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
7592 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n");
7596 rc = pqi_add_sas_host(shost, ctrl_info);
7598 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n");
7602 ctrl_info->scsi_host = shost;
7607 scsi_remove_host(shost);
7609 scsi_host_put(shost);
7614 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
7616 struct Scsi_Host *shost;
7618 pqi_delete_sas_host(ctrl_info);
7620 shost = ctrl_info->scsi_host;
7624 scsi_remove_host(shost);
7625 scsi_host_put(shost);
7628 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
7631 struct pqi_device_registers __iomem *pqi_registers;
7632 unsigned long timeout;
7633 unsigned int timeout_msecs;
7634 union pqi_reset_register reset_reg;
7636 pqi_registers = ctrl_info->pqi_registers;
7637 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100;
7638 timeout = msecs_to_jiffies(timeout_msecs) + jiffies;
7641 msleep(PQI_RESET_POLL_INTERVAL_MSECS);
7642 reset_reg.all_bits = readl(&pqi_registers->device_reset);
7643 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
7645 if (!sis_is_firmware_running(ctrl_info)) {
7649 if (time_after(jiffies, timeout)) {
7658 static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
7661 union pqi_reset_register reset_reg;
7663 if (ctrl_info->pqi_reset_quiesce_supported) {
7664 rc = sis_pqi_reset_quiesce(ctrl_info);
7666 dev_err(&ctrl_info->pci_dev->dev,
7667 "PQI reset failed during quiesce with error %d\n", rc);
7672 reset_reg.all_bits = 0;
7673 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
7674 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
7676 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset);
7678 rc = pqi_wait_for_pqi_reset_completion(ctrl_info);
7680 dev_err(&ctrl_info->pci_dev->dev,
7681 "PQI reset failed with error %d\n", rc);
7686 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
7689 struct bmic_sense_subsystem_info *sense_info;
7691 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL);
7695 rc = pqi_sense_subsystem_info(ctrl_info, sense_info);
7699 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number,
7700 sizeof(sense_info->ctrl_serial_number));
7701 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0';
7709 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
7712 struct bmic_identify_controller *identify;
7714 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
7718 rc = pqi_identify_controller(ctrl_info, identify);
7722 if (get_unaligned_le32(&identify->extra_controller_flags) &
7723 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) {
7724 memcpy(ctrl_info->firmware_version,
7725 identify->firmware_version_long,
7726 sizeof(identify->firmware_version_long));
7728 memcpy(ctrl_info->firmware_version,
7729 identify->firmware_version_short,
7730 sizeof(identify->firmware_version_short));
7731 ctrl_info->firmware_version
7732 [sizeof(identify->firmware_version_short)] = '\0';
7733 snprintf(ctrl_info->firmware_version +
7734 strlen(ctrl_info->firmware_version),
7735 sizeof(ctrl_info->firmware_version) -
7736 sizeof(identify->firmware_version_short),
7738 get_unaligned_le16(&identify->firmware_build_number));
7741 memcpy(ctrl_info->model, identify->product_id,
7742 sizeof(identify->product_id));
7743 ctrl_info->model[sizeof(identify->product_id)] = '\0';
7745 memcpy(ctrl_info->vendor, identify->vendor_id,
7746 sizeof(identify->vendor_id));
7747 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0';
7749 dev_info(&ctrl_info->pci_dev->dev,
7750 "Firmware version: %s\n", ctrl_info->firmware_version);
7758 struct pqi_config_table_section_info {
7759 struct pqi_ctrl_info *ctrl_info;
7762 void __iomem *section_iomem_addr;
7765 static inline bool pqi_is_firmware_feature_supported(
7766 struct pqi_config_table_firmware_features *firmware_features,
7767 unsigned int bit_position)
7769 unsigned int byte_index;
7771 byte_index = bit_position / BITS_PER_BYTE;
7773 if (byte_index >= le16_to_cpu(firmware_features->num_elements))
7776 return firmware_features->features_supported[byte_index] &
7777 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7780 static inline bool pqi_is_firmware_feature_enabled(
7781 struct pqi_config_table_firmware_features *firmware_features,
7782 void __iomem *firmware_features_iomem_addr,
7783 unsigned int bit_position)
7785 unsigned int byte_index;
7786 u8 __iomem *features_enabled_iomem_addr;
7788 byte_index = (bit_position / BITS_PER_BYTE) +
7789 (le16_to_cpu(firmware_features->num_elements) * 2);
7791 features_enabled_iomem_addr = firmware_features_iomem_addr +
7792 offsetof(struct pqi_config_table_firmware_features,
7793 features_supported) + byte_index;
7795 return *((__force u8 *)features_enabled_iomem_addr) &
7796 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
7799 static inline void pqi_request_firmware_feature(
7800 struct pqi_config_table_firmware_features *firmware_features,
7801 unsigned int bit_position)
7803 unsigned int byte_index;
7805 byte_index = (bit_position / BITS_PER_BYTE) +
7806 le16_to_cpu(firmware_features->num_elements);
7808 firmware_features->features_supported[byte_index] |=
7809 (1 << (bit_position % BITS_PER_BYTE));
7812 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
7813 u16 first_section, u16 last_section)
7815 struct pqi_vendor_general_request request;
7817 memset(&request, 0, sizeof(request));
7819 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
7820 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
7821 &request.header.iu_length);
7822 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE,
7823 &request.function_code);
7824 put_unaligned_le16(first_section,
7825 &request.data.config_table_update.first_section);
7826 put_unaligned_le16(last_section,
7827 &request.data.config_table_update.last_section);
7829 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
7832 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
7833 struct pqi_config_table_firmware_features *firmware_features,
7834 void __iomem *firmware_features_iomem_addr)
7836 void *features_requested;
7837 void __iomem *features_requested_iomem_addr;
7838 void __iomem *host_max_known_feature_iomem_addr;
7840 features_requested = firmware_features->features_supported +
7841 le16_to_cpu(firmware_features->num_elements);
7843 features_requested_iomem_addr = firmware_features_iomem_addr +
7844 (features_requested - (void *)firmware_features);
7846 memcpy_toio(features_requested_iomem_addr, features_requested,
7847 le16_to_cpu(firmware_features->num_elements));
7849 if (pqi_is_firmware_feature_supported(firmware_features,
7850 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) {
7851 host_max_known_feature_iomem_addr =
7852 features_requested_iomem_addr +
7853 (le16_to_cpu(firmware_features->num_elements) * 2) +
7855 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
7856 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
7859 return pqi_config_table_update(ctrl_info,
7860 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES,
7861 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES);
7864 struct pqi_firmware_feature {
7866 unsigned int feature_bit;
7869 void (*feature_status)(struct pqi_ctrl_info *ctrl_info,
7870 struct pqi_firmware_feature *firmware_feature);
7873 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
7874 struct pqi_firmware_feature *firmware_feature)
7876 if (!firmware_feature->supported) {
7877 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n",
7878 firmware_feature->feature_name);
7882 if (firmware_feature->enabled) {
7883 dev_info(&ctrl_info->pci_dev->dev,
7884 "%s enabled\n", firmware_feature->feature_name);
7888 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n",
7889 firmware_feature->feature_name);
7892 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
7893 struct pqi_firmware_feature *firmware_feature)
7895 switch (firmware_feature->feature_bit) {
7896 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS:
7897 ctrl_info->enable_r1_writes = firmware_feature->enabled;
7899 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS:
7900 ctrl_info->enable_r5_writes = firmware_feature->enabled;
7902 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS:
7903 ctrl_info->enable_r6_writes = firmware_feature->enabled;
7905 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE:
7906 ctrl_info->soft_reset_handshake_supported =
7907 firmware_feature->enabled &&
7908 pqi_read_soft_reset_status(ctrl_info);
7910 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT:
7911 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled;
7913 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT:
7914 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled;
7916 case PQI_FIRMWARE_FEATURE_FW_TRIAGE:
7917 ctrl_info->firmware_triage_supported = firmware_feature->enabled;
7918 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled);
7920 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5:
7921 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled;
7923 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT:
7924 ctrl_info->multi_lun_device_supported = firmware_feature->enabled;
7926 case PQI_FIRMWARE_FEATURE_CTRL_LOGGING:
7927 ctrl_info->ctrl_logging_supported = firmware_feature->enabled;
7931 pqi_firmware_feature_status(ctrl_info, firmware_feature);
7934 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
7935 struct pqi_firmware_feature *firmware_feature)
7937 if (firmware_feature->feature_status)
7938 firmware_feature->feature_status(ctrl_info, firmware_feature);
7941 static DEFINE_MUTEX(pqi_firmware_features_mutex);
7943 static struct pqi_firmware_feature pqi_firmware_features[] = {
7945 .feature_name = "Online Firmware Activation",
7946 .feature_bit = PQI_FIRMWARE_FEATURE_OFA,
7947 .feature_status = pqi_firmware_feature_status,
7950 .feature_name = "Serial Management Protocol",
7951 .feature_bit = PQI_FIRMWARE_FEATURE_SMP,
7952 .feature_status = pqi_firmware_feature_status,
7955 .feature_name = "Maximum Known Feature",
7956 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE,
7957 .feature_status = pqi_firmware_feature_status,
7960 .feature_name = "RAID 0 Read Bypass",
7961 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS,
7962 .feature_status = pqi_firmware_feature_status,
7965 .feature_name = "RAID 1 Read Bypass",
7966 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS,
7967 .feature_status = pqi_firmware_feature_status,
7970 .feature_name = "RAID 5 Read Bypass",
7971 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS,
7972 .feature_status = pqi_firmware_feature_status,
7975 .feature_name = "RAID 6 Read Bypass",
7976 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS,
7977 .feature_status = pqi_firmware_feature_status,
7980 .feature_name = "RAID 0 Write Bypass",
7981 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS,
7982 .feature_status = pqi_firmware_feature_status,
7985 .feature_name = "RAID 1 Write Bypass",
7986 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS,
7987 .feature_status = pqi_ctrl_update_feature_flags,
7990 .feature_name = "RAID 5 Write Bypass",
7991 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS,
7992 .feature_status = pqi_ctrl_update_feature_flags,
7995 .feature_name = "RAID 6 Write Bypass",
7996 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS,
7997 .feature_status = pqi_ctrl_update_feature_flags,
8000 .feature_name = "New Soft Reset Handshake",
8001 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE,
8002 .feature_status = pqi_ctrl_update_feature_flags,
8005 .feature_name = "RAID IU Timeout",
8006 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT,
8007 .feature_status = pqi_ctrl_update_feature_flags,
8010 .feature_name = "TMF IU Timeout",
8011 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT,
8012 .feature_status = pqi_ctrl_update_feature_flags,
8015 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe",
8016 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME,
8017 .feature_status = pqi_firmware_feature_status,
8020 .feature_name = "Firmware Triage",
8021 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE,
8022 .feature_status = pqi_ctrl_update_feature_flags,
8025 .feature_name = "RPL Extended Formats 4 and 5",
8026 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5,
8027 .feature_status = pqi_ctrl_update_feature_flags,
8030 .feature_name = "Multi-LUN Target",
8031 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT,
8032 .feature_status = pqi_ctrl_update_feature_flags,
8035 .feature_name = "Controller Data Logging",
8036 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING,
8037 .feature_status = pqi_ctrl_update_feature_flags,
8041 static void pqi_process_firmware_features(
8042 struct pqi_config_table_section_info *section_info)
8045 struct pqi_ctrl_info *ctrl_info;
8046 struct pqi_config_table_firmware_features *firmware_features;
8047 void __iomem *firmware_features_iomem_addr;
8049 unsigned int num_features_supported;
8051 ctrl_info = section_info->ctrl_info;
8052 firmware_features = section_info->section;
8053 firmware_features_iomem_addr = section_info->section_iomem_addr;
8055 for (i = 0, num_features_supported = 0;
8056 i < ARRAY_SIZE(pqi_firmware_features); i++) {
8057 if (pqi_is_firmware_feature_supported(firmware_features,
8058 pqi_firmware_features[i].feature_bit)) {
8059 pqi_firmware_features[i].supported = true;
8060 num_features_supported++;
8062 pqi_firmware_feature_update(ctrl_info,
8063 &pqi_firmware_features[i]);
8067 if (num_features_supported == 0)
8070 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8071 if (!pqi_firmware_features[i].supported)
8073 pqi_request_firmware_feature(firmware_features,
8074 pqi_firmware_features[i].feature_bit);
8077 rc = pqi_enable_firmware_features(ctrl_info, firmware_features,
8078 firmware_features_iomem_addr);
8080 dev_err(&ctrl_info->pci_dev->dev,
8081 "failed to enable firmware features in PQI configuration table\n");
8082 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8083 if (!pqi_firmware_features[i].supported)
8085 pqi_firmware_feature_update(ctrl_info,
8086 &pqi_firmware_features[i]);
8091 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8092 if (!pqi_firmware_features[i].supported)
8094 if (pqi_is_firmware_feature_enabled(firmware_features,
8095 firmware_features_iomem_addr,
8096 pqi_firmware_features[i].feature_bit)) {
8097 pqi_firmware_features[i].enabled = true;
8099 pqi_firmware_feature_update(ctrl_info,
8100 &pqi_firmware_features[i]);
8104 static void pqi_init_firmware_features(void)
8108 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
8109 pqi_firmware_features[i].supported = false;
8110 pqi_firmware_features[i].enabled = false;
8114 static void pqi_process_firmware_features_section(
8115 struct pqi_config_table_section_info *section_info)
8117 mutex_lock(&pqi_firmware_features_mutex);
8118 pqi_init_firmware_features();
8119 pqi_process_firmware_features(section_info);
8120 mutex_unlock(&pqi_firmware_features_mutex);
8124 * Reset all controller settings that can be initialized during the processing
8125 * of the PQI Configuration Table.
8128 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
8130 ctrl_info->heartbeat_counter = NULL;
8131 ctrl_info->soft_reset_status = NULL;
8132 ctrl_info->soft_reset_handshake_supported = false;
8133 ctrl_info->enable_r1_writes = false;
8134 ctrl_info->enable_r5_writes = false;
8135 ctrl_info->enable_r6_writes = false;
8136 ctrl_info->raid_iu_timeout_supported = false;
8137 ctrl_info->tmf_iu_timeout_supported = false;
8138 ctrl_info->firmware_triage_supported = false;
8139 ctrl_info->rpl_extended_format_4_5_supported = false;
8140 ctrl_info->multi_lun_device_supported = false;
8141 ctrl_info->ctrl_logging_supported = false;
8144 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
8148 bool firmware_feature_section_present;
8149 void __iomem *table_iomem_addr;
8150 struct pqi_config_table *config_table;
8151 struct pqi_config_table_section_header *section;
8152 struct pqi_config_table_section_info section_info;
8153 struct pqi_config_table_section_info feature_section_info = {0};
8155 table_length = ctrl_info->config_table_length;
8156 if (table_length == 0)
8159 config_table = kmalloc(table_length, GFP_KERNEL);
8160 if (!config_table) {
8161 dev_err(&ctrl_info->pci_dev->dev,
8162 "failed to allocate memory for PQI configuration table\n");
8167 * Copy the config table contents from I/O memory space into the
8170 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset;
8171 memcpy_fromio(config_table, table_iomem_addr, table_length);
8173 firmware_feature_section_present = false;
8174 section_info.ctrl_info = ctrl_info;
8175 section_offset = get_unaligned_le32(&config_table->first_section_offset);
8177 while (section_offset) {
8178 section = (void *)config_table + section_offset;
8180 section_info.section = section;
8181 section_info.section_offset = section_offset;
8182 section_info.section_iomem_addr = table_iomem_addr + section_offset;
8184 switch (get_unaligned_le16(§ion->section_id)) {
8185 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES:
8186 firmware_feature_section_present = true;
8187 feature_section_info = section_info;
8189 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
8190 if (pqi_disable_heartbeat)
8191 dev_warn(&ctrl_info->pci_dev->dev,
8192 "heartbeat disabled by module parameter\n");
8194 ctrl_info->heartbeat_counter =
8197 offsetof(struct pqi_config_table_heartbeat,
8200 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET:
8201 ctrl_info->soft_reset_status =
8204 offsetof(struct pqi_config_table_soft_reset,
8209 section_offset = get_unaligned_le16(§ion->next_section_offset);
8213 * We process the firmware feature section after all other sections
8214 * have been processed so that the feature bit callbacks can take
8215 * into account the settings configured by other sections.
8217 if (firmware_feature_section_present)
8218 pqi_process_firmware_features_section(&feature_section_info);
8220 kfree(config_table);
8225 /* Switches the controller from PQI mode back into SIS mode. */
8227 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
8231 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
8232 rc = pqi_reset(ctrl_info);
8235 rc = sis_reenable_sis_mode(ctrl_info);
8237 dev_err(&ctrl_info->pci_dev->dev,
8238 "re-enabling SIS mode failed with error %d\n", rc);
8241 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8247 * If the controller isn't already in SIS mode, this function forces it into
8251 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
8253 if (!sis_is_firmware_running(ctrl_info))
8256 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
8259 if (sis_is_kernel_up(ctrl_info)) {
8260 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
8264 return pqi_revert_to_sis_mode(ctrl_info);
8267 static void pqi_perform_lockup_action(void)
8269 switch (pqi_lockup_action) {
8271 panic("FATAL: Smart Family Controller lockup detected");
8274 emergency_restart();
8282 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024)
8283 #define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS)
8285 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
8290 if (reset_devices) {
8291 if (pqi_is_fw_triage_supported(ctrl_info)) {
8292 rc = sis_wait_for_fw_triage_completion(ctrl_info);
8296 if (sis_is_ctrl_logging_supported(ctrl_info)) {
8297 sis_notify_kdump(ctrl_info);
8298 rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
8302 sis_soft_reset(ctrl_info);
8303 ssleep(PQI_POST_RESET_DELAY_SECS);
8305 rc = pqi_force_sis_mode(ctrl_info);
8311 * Wait until the controller is ready to start accepting SIS
8314 rc = sis_wait_for_ctrl_ready(ctrl_info);
8316 if (reset_devices) {
8317 dev_err(&ctrl_info->pci_dev->dev,
8318 "kdump init failed with error %d\n", rc);
8319 pqi_lockup_action = REBOOT;
8320 pqi_perform_lockup_action();
8326 * Get the controller properties. This allows us to determine
8327 * whether or not it supports PQI mode.
8329 rc = sis_get_ctrl_properties(ctrl_info);
8331 dev_err(&ctrl_info->pci_dev->dev,
8332 "error obtaining controller properties\n");
8336 rc = sis_get_pqi_capabilities(ctrl_info);
8338 dev_err(&ctrl_info->pci_dev->dev,
8339 "error obtaining controller capabilities\n");
8343 product_id = sis_get_product_id(ctrl_info);
8344 ctrl_info->product_id = (u8)product_id;
8345 ctrl_info->product_revision = (u8)(product_id >> 8);
8347 if (reset_devices) {
8348 if (ctrl_info->max_outstanding_requests >
8349 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
8350 ctrl_info->max_outstanding_requests =
8351 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
8353 if (ctrl_info->max_outstanding_requests >
8354 PQI_MAX_OUTSTANDING_REQUESTS)
8355 ctrl_info->max_outstanding_requests =
8356 PQI_MAX_OUTSTANDING_REQUESTS;
8359 pqi_calculate_io_resources(ctrl_info);
8361 rc = pqi_alloc_error_buffer(ctrl_info);
8363 dev_err(&ctrl_info->pci_dev->dev,
8364 "failed to allocate PQI error buffer\n");
8369 * If the function we are about to call succeeds, the
8370 * controller will transition from legacy SIS mode
8373 rc = sis_init_base_struct_addr(ctrl_info);
8375 dev_err(&ctrl_info->pci_dev->dev,
8376 "error initializing PQI mode\n");
8380 /* Wait for the controller to complete the SIS -> PQI transition. */
8381 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8383 dev_err(&ctrl_info->pci_dev->dev,
8384 "transition to PQI mode failed\n");
8388 /* From here on, we are running in PQI mode. */
8389 ctrl_info->pqi_mode_enabled = true;
8390 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8392 rc = pqi_alloc_admin_queues(ctrl_info);
8394 dev_err(&ctrl_info->pci_dev->dev,
8395 "failed to allocate admin queues\n");
8399 rc = pqi_create_admin_queues(ctrl_info);
8401 dev_err(&ctrl_info->pci_dev->dev,
8402 "error creating admin queues\n");
8406 rc = pqi_report_device_capability(ctrl_info);
8408 dev_err(&ctrl_info->pci_dev->dev,
8409 "obtaining device capability failed\n");
8413 rc = pqi_validate_device_capability(ctrl_info);
8417 pqi_calculate_queue_resources(ctrl_info);
8419 rc = pqi_enable_msix_interrupts(ctrl_info);
8423 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
8424 ctrl_info->max_msix_vectors =
8425 ctrl_info->num_msix_vectors_enabled;
8426 pqi_calculate_queue_resources(ctrl_info);
8429 rc = pqi_alloc_io_resources(ctrl_info);
8433 rc = pqi_alloc_operational_queues(ctrl_info);
8435 dev_err(&ctrl_info->pci_dev->dev,
8436 "failed to allocate operational queues\n");
8440 pqi_init_operational_queues(ctrl_info);
8442 rc = pqi_create_queues(ctrl_info);
8446 rc = pqi_request_irqs(ctrl_info);
8450 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8452 ctrl_info->controller_online = true;
8454 rc = pqi_process_config_table(ctrl_info);
8458 pqi_start_heartbeat_timer(ctrl_info);
8460 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8461 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8462 if (rc) { /* Supported features not returned correctly. */
8463 dev_err(&ctrl_info->pci_dev->dev,
8464 "error obtaining advanced RAID bypass configuration\n");
8467 ctrl_info->ciss_report_log_flags |=
8468 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8471 rc = pqi_enable_events(ctrl_info);
8473 dev_err(&ctrl_info->pci_dev->dev,
8474 "error enabling events\n");
8478 /* Register with the SCSI subsystem. */
8479 rc = pqi_register_scsi(ctrl_info);
8483 if (ctrl_info->ctrl_logging_supported && !reset_devices) {
8484 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
8485 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8488 rc = pqi_get_ctrl_product_details(ctrl_info);
8490 dev_err(&ctrl_info->pci_dev->dev,
8491 "error obtaining product details\n");
8495 rc = pqi_get_ctrl_serial_number(ctrl_info);
8497 dev_err(&ctrl_info->pci_dev->dev,
8498 "error obtaining ctrl serial number\n");
8502 rc = pqi_set_diag_rescan(ctrl_info);
8504 dev_err(&ctrl_info->pci_dev->dev,
8505 "error enabling multi-lun rescan\n");
8509 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8511 dev_err(&ctrl_info->pci_dev->dev,
8512 "error updating host wellness\n");
8516 pqi_schedule_update_time_worker(ctrl_info);
8518 pqi_scan_scsi_devices(ctrl_info);
8523 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
8526 struct pqi_admin_queues *admin_queues;
8527 struct pqi_event_queue *event_queue;
8529 admin_queues = &ctrl_info->admin_queues;
8530 admin_queues->iq_pi_copy = 0;
8531 admin_queues->oq_ci_copy = 0;
8532 writel(0, admin_queues->oq_pi);
8534 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
8535 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
8536 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
8537 ctrl_info->queue_groups[i].oq_ci_copy = 0;
8539 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]);
8540 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]);
8541 writel(0, ctrl_info->queue_groups[i].oq_pi);
8544 event_queue = &ctrl_info->event_queue;
8545 writel(0, event_queue->oq_pi);
8546 event_queue->oq_ci_copy = 0;
8549 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
8553 rc = pqi_force_sis_mode(ctrl_info);
8558 * Wait until the controller is ready to start accepting SIS
8561 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
8566 * Get the controller properties. This allows us to determine
8567 * whether or not it supports PQI mode.
8569 rc = sis_get_ctrl_properties(ctrl_info);
8571 dev_err(&ctrl_info->pci_dev->dev,
8572 "error obtaining controller properties\n");
8576 rc = sis_get_pqi_capabilities(ctrl_info);
8578 dev_err(&ctrl_info->pci_dev->dev,
8579 "error obtaining controller capabilities\n");
8584 * If the function we are about to call succeeds, the
8585 * controller will transition from legacy SIS mode
8588 rc = sis_init_base_struct_addr(ctrl_info);
8590 dev_err(&ctrl_info->pci_dev->dev,
8591 "error initializing PQI mode\n");
8595 /* Wait for the controller to complete the SIS -> PQI transition. */
8596 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
8598 dev_err(&ctrl_info->pci_dev->dev,
8599 "transition to PQI mode failed\n");
8603 /* From here on, we are running in PQI mode. */
8604 ctrl_info->pqi_mode_enabled = true;
8605 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
8607 pqi_reinit_queues(ctrl_info);
8609 rc = pqi_create_admin_queues(ctrl_info);
8611 dev_err(&ctrl_info->pci_dev->dev,
8612 "error creating admin queues\n");
8616 rc = pqi_create_queues(ctrl_info);
8620 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
8622 ctrl_info->controller_online = true;
8623 pqi_ctrl_unblock_requests(ctrl_info);
8625 pqi_ctrl_reset_config(ctrl_info);
8627 rc = pqi_process_config_table(ctrl_info);
8631 pqi_start_heartbeat_timer(ctrl_info);
8633 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) {
8634 rc = pqi_get_advanced_raid_bypass_config(ctrl_info);
8636 dev_err(&ctrl_info->pci_dev->dev,
8637 "error obtaining advanced RAID bypass configuration\n");
8640 ctrl_info->ciss_report_log_flags |=
8641 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX;
8644 rc = pqi_enable_events(ctrl_info);
8646 dev_err(&ctrl_info->pci_dev->dev,
8647 "error enabling events\n");
8651 rc = pqi_get_ctrl_product_details(ctrl_info);
8653 dev_err(&ctrl_info->pci_dev->dev,
8654 "error obtaining product details\n");
8658 rc = pqi_set_diag_rescan(ctrl_info);
8660 dev_err(&ctrl_info->pci_dev->dev,
8661 "error enabling multi-lun rescan\n");
8665 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
8667 dev_err(&ctrl_info->pci_dev->dev,
8668 "error updating host wellness\n");
8672 if (pqi_ofa_in_progress(ctrl_info)) {
8673 pqi_ctrl_unblock_scan(ctrl_info);
8674 if (ctrl_info->ctrl_logging_supported) {
8675 if (!ctrl_info->ctrl_log_memory.host_memory)
8676 pqi_host_setup_buffer(ctrl_info,
8677 &ctrl_info->ctrl_log_memory,
8678 PQI_CTRL_LOG_TOTAL_SIZE,
8679 PQI_CTRL_LOG_MIN_SIZE);
8680 pqi_host_memory_update(ctrl_info,
8681 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
8683 if (ctrl_info->ctrl_log_memory.host_memory)
8684 pqi_host_free_buffer(ctrl_info,
8685 &ctrl_info->ctrl_log_memory);
8689 pqi_scan_scsi_devices(ctrl_info);
8694 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
8698 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
8699 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
8701 return pcibios_err_to_errno(rc);
8704 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
8709 rc = pci_enable_device(ctrl_info->pci_dev);
8711 dev_err(&ctrl_info->pci_dev->dev,
8712 "failed to enable PCI device\n");
8716 if (sizeof(dma_addr_t) > 4)
8717 mask = DMA_BIT_MASK(64);
8719 mask = DMA_BIT_MASK(32);
8721 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
8723 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
8724 goto disable_device;
8727 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
8729 dev_err(&ctrl_info->pci_dev->dev,
8730 "failed to obtain PCI resources\n");
8731 goto disable_device;
8734 ctrl_info->iomem_base = ioremap(pci_resource_start(
8735 ctrl_info->pci_dev, 0),
8736 pci_resource_len(ctrl_info->pci_dev, 0));
8737 if (!ctrl_info->iomem_base) {
8738 dev_err(&ctrl_info->pci_dev->dev,
8739 "failed to map memory for controller registers\n");
8741 goto release_regions;
8744 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
8746 /* Increase the PCIe completion timeout. */
8747 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
8748 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
8750 dev_err(&ctrl_info->pci_dev->dev,
8751 "failed to set PCIe completion timeout\n");
8752 goto release_regions;
8755 /* Enable bus mastering. */
8756 pci_set_master(ctrl_info->pci_dev);
8758 ctrl_info->registers = ctrl_info->iomem_base;
8759 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
8761 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
8766 pci_release_regions(ctrl_info->pci_dev);
8768 pci_disable_device(ctrl_info->pci_dev);
8773 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
8775 iounmap(ctrl_info->iomem_base);
8776 pci_release_regions(ctrl_info->pci_dev);
8777 if (pci_is_enabled(ctrl_info->pci_dev))
8778 pci_disable_device(ctrl_info->pci_dev);
8779 pci_set_drvdata(ctrl_info->pci_dev, NULL);
8782 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
8784 struct pqi_ctrl_info *ctrl_info;
8786 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
8787 GFP_KERNEL, numa_node);
8791 mutex_init(&ctrl_info->scan_mutex);
8792 mutex_init(&ctrl_info->lun_reset_mutex);
8793 mutex_init(&ctrl_info->ofa_mutex);
8795 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
8796 spin_lock_init(&ctrl_info->scsi_device_list_lock);
8798 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
8799 atomic_set(&ctrl_info->num_interrupts, 0);
8801 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
8802 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
8804 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
8805 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
8807 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker);
8808 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker);
8810 sema_init(&ctrl_info->sync_request_sem,
8811 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
8812 init_waitqueue_head(&ctrl_info->block_requests_wait);
8814 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
8815 ctrl_info->irq_mode = IRQ_MODE_NONE;
8816 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
8818 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID;
8819 ctrl_info->max_transfer_encrypted_sas_sata =
8820 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA;
8821 ctrl_info->max_transfer_encrypted_nvme =
8822 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME;
8823 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6;
8824 ctrl_info->max_write_raid_1_10_2drive = ~0;
8825 ctrl_info->max_write_raid_1_10_3drive = ~0;
8826 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts;
8831 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
8836 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
8838 pqi_free_irqs(ctrl_info);
8839 pqi_disable_msix_interrupts(ctrl_info);
8842 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
8844 pqi_free_interrupts(ctrl_info);
8845 if (ctrl_info->queue_memory_base)
8846 dma_free_coherent(&ctrl_info->pci_dev->dev,
8847 ctrl_info->queue_memory_length,
8848 ctrl_info->queue_memory_base,
8849 ctrl_info->queue_memory_base_dma_handle);
8850 if (ctrl_info->admin_queue_memory_base)
8851 dma_free_coherent(&ctrl_info->pci_dev->dev,
8852 ctrl_info->admin_queue_memory_length,
8853 ctrl_info->admin_queue_memory_base,
8854 ctrl_info->admin_queue_memory_base_dma_handle);
8855 pqi_free_all_io_requests(ctrl_info);
8856 if (ctrl_info->error_buffer)
8857 dma_free_coherent(&ctrl_info->pci_dev->dev,
8858 ctrl_info->error_buffer_length,
8859 ctrl_info->error_buffer,
8860 ctrl_info->error_buffer_dma_handle);
8861 if (ctrl_info->iomem_base)
8862 pqi_cleanup_pci_init(ctrl_info);
8863 pqi_free_ctrl_info(ctrl_info);
8866 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
8868 ctrl_info->controller_online = false;
8869 pqi_stop_heartbeat_timer(ctrl_info);
8870 pqi_ctrl_block_requests(ctrl_info);
8871 pqi_cancel_rescan_worker(ctrl_info);
8872 pqi_cancel_update_time_worker(ctrl_info);
8873 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) {
8874 pqi_fail_all_outstanding_requests(ctrl_info);
8875 ctrl_info->pqi_mode_enabled = false;
8877 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory);
8878 pqi_unregister_scsi(ctrl_info);
8879 if (ctrl_info->pqi_mode_enabled)
8880 pqi_revert_to_sis_mode(ctrl_info);
8881 pqi_free_ctrl_resources(ctrl_info);
8884 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
8886 pqi_ctrl_block_scan(ctrl_info);
8887 pqi_scsi_block_requests(ctrl_info);
8888 pqi_ctrl_block_device_reset(ctrl_info);
8889 pqi_ctrl_block_requests(ctrl_info);
8890 pqi_ctrl_wait_until_quiesced(ctrl_info);
8891 pqi_stop_heartbeat_timer(ctrl_info);
8894 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
8896 pqi_start_heartbeat_timer(ctrl_info);
8897 pqi_ctrl_unblock_requests(ctrl_info);
8898 pqi_ctrl_unblock_device_reset(ctrl_info);
8899 pqi_scsi_unblock_requests(ctrl_info);
8900 pqi_ctrl_unblock_scan(ctrl_info);
8903 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
8907 return pqi_ctrl_init_resume(ctrl_info);
8910 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info,
8911 struct pqi_host_memory_descriptor *host_memory_descriptor,
8912 u32 total_size, u32 chunk_size)
8917 struct pqi_host_memory *host_memory;
8918 struct pqi_sg_descriptor *mem_descriptor;
8919 dma_addr_t dma_handle;
8921 sg_count = DIV_ROUND_UP(total_size, chunk_size);
8922 if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS)
8925 host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL);
8926 if (!host_memory_descriptor->host_chunk_virt_address)
8929 dev = &ctrl_info->pci_dev->dev;
8930 host_memory = host_memory_descriptor->host_memory;
8932 for (i = 0; i < sg_count; i++) {
8933 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL);
8934 if (!host_memory_descriptor->host_chunk_virt_address[i])
8935 goto out_free_chunks;
8936 mem_descriptor = &host_memory->sg_descriptor[i];
8937 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address);
8938 put_unaligned_le32(chunk_size, &mem_descriptor->length);
8941 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags);
8942 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors);
8943 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated);
8949 mem_descriptor = &host_memory->sg_descriptor[i];
8950 dma_free_coherent(dev, chunk_size,
8951 host_memory_descriptor->host_chunk_virt_address[i],
8952 get_unaligned_le64(&mem_descriptor->address));
8954 kfree(host_memory_descriptor->host_chunk_virt_address);
8959 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info,
8960 struct pqi_host_memory_descriptor *host_memory_descriptor,
8961 u32 total_required_size, u32 min_required_size)
8966 if (total_required_size == 0 || min_required_size == 0)
8969 total_required_size = PAGE_ALIGN(total_required_size);
8970 min_required_size = PAGE_ALIGN(min_required_size);
8971 min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS);
8972 min_chunk_size = PAGE_ALIGN(min_chunk_size);
8974 while (total_required_size >= min_required_size) {
8975 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) {
8976 if (pqi_host_alloc_mem(ctrl_info,
8977 host_memory_descriptor, total_required_size,
8981 chunk_size = PAGE_ALIGN(chunk_size);
8983 total_required_size /= 2;
8984 total_required_size = PAGE_ALIGN(total_required_size);
8990 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info,
8991 struct pqi_host_memory_descriptor *host_memory_descriptor,
8992 u32 total_size, u32 min_size)
8995 struct pqi_host_memory *host_memory;
8997 dev = &ctrl_info->pci_dev->dev;
8999 host_memory = dma_alloc_coherent(dev, sizeof(*host_memory),
9000 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL);
9004 host_memory_descriptor->host_memory = host_memory;
9006 if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor,
9007 total_size, min_size) < 0) {
9008 dev_err(dev, "failed to allocate firmware usable host buffer\n");
9009 dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9010 host_memory_descriptor->host_memory_dma_handle);
9011 host_memory_descriptor->host_memory = NULL;
9016 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info,
9017 struct pqi_host_memory_descriptor *host_memory_descriptor)
9021 struct pqi_host_memory *host_memory;
9022 struct pqi_sg_descriptor *mem_descriptor;
9023 unsigned int num_memory_descriptors;
9025 host_memory = host_memory_descriptor->host_memory;
9029 dev = &ctrl_info->pci_dev->dev;
9031 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0)
9034 mem_descriptor = host_memory->sg_descriptor;
9035 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors);
9037 for (i = 0; i < num_memory_descriptors; i++) {
9038 dma_free_coherent(dev,
9039 get_unaligned_le32(&mem_descriptor[i].length),
9040 host_memory_descriptor->host_chunk_virt_address[i],
9041 get_unaligned_le64(&mem_descriptor[i].address));
9043 kfree(host_memory_descriptor->host_chunk_virt_address);
9046 dma_free_coherent(dev, sizeof(*host_memory), host_memory,
9047 host_memory_descriptor->host_memory_dma_handle);
9048 host_memory_descriptor->host_memory = NULL;
9051 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info,
9052 struct pqi_host_memory_descriptor *host_memory_descriptor,
9056 struct pqi_vendor_general_request request;
9057 struct pqi_host_memory *host_memory;
9059 memset(&request, 0, sizeof(request));
9061 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
9062 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
9063 put_unaligned_le16(function_code, &request.function_code);
9065 host_memory = host_memory_descriptor->host_memory;
9068 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor);
9069 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address);
9070 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length);
9072 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) {
9073 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version);
9074 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature));
9075 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) {
9076 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version);
9077 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature));
9081 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL);
9084 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
9085 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
9086 .status = SAM_STAT_CHECK_CONDITION,
9089 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
9092 struct pqi_io_request *io_request;
9093 struct scsi_cmnd *scmd;
9094 struct scsi_device *sdev;
9096 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9097 io_request = &ctrl_info->io_request_pool[i];
9098 if (atomic_read(&io_request->refcount) == 0)
9101 scmd = io_request->scmd;
9103 sdev = scmd->device;
9104 if (!sdev || !scsi_device_online(sdev)) {
9105 pqi_free_io_request(io_request);
9108 set_host_byte(scmd, DID_NO_CONNECT);
9111 io_request->status = -ENXIO;
9112 io_request->error_info =
9113 &pqi_ctrl_offline_raid_error_info;
9116 io_request->io_complete_callback(io_request,
9117 io_request->context);
9121 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
9123 pqi_perform_lockup_action();
9124 pqi_stop_heartbeat_timer(ctrl_info);
9125 pqi_free_interrupts(ctrl_info);
9126 pqi_cancel_rescan_worker(ctrl_info);
9127 pqi_cancel_update_time_worker(ctrl_info);
9128 pqi_ctrl_wait_until_quiesced(ctrl_info);
9129 pqi_fail_all_outstanding_requests(ctrl_info);
9130 pqi_ctrl_unblock_requests(ctrl_info);
9133 static void pqi_ctrl_offline_worker(struct work_struct *work)
9135 struct pqi_ctrl_info *ctrl_info;
9137 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
9138 pqi_take_ctrl_offline_deferred(ctrl_info);
9141 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9145 switch (ctrl_shutdown_reason) {
9146 case PQI_IQ_NOT_DRAINED_TIMEOUT:
9147 string = "inbound queue not drained timeout";
9149 case PQI_LUN_RESET_TIMEOUT:
9150 string = "LUN reset timeout";
9152 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT:
9153 string = "I/O pending timeout after LUN reset";
9155 case PQI_NO_HEARTBEAT:
9156 string = "no controller heartbeat detected";
9158 case PQI_FIRMWARE_KERNEL_NOT_UP:
9159 string = "firmware kernel not ready";
9161 case PQI_OFA_RESPONSE_TIMEOUT:
9162 string = "OFA response timeout";
9164 case PQI_INVALID_REQ_ID:
9165 string = "invalid request ID";
9167 case PQI_UNMATCHED_REQ_ID:
9168 string = "unmatched request ID";
9170 case PQI_IO_PI_OUT_OF_RANGE:
9171 string = "I/O queue producer index out of range";
9173 case PQI_EVENT_PI_OUT_OF_RANGE:
9174 string = "event queue producer index out of range";
9176 case PQI_UNEXPECTED_IU_TYPE:
9177 string = "unexpected IU type";
9180 string = "unknown reason";
9187 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
9188 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
9190 if (!ctrl_info->controller_online)
9193 ctrl_info->controller_online = false;
9194 ctrl_info->pqi_mode_enabled = false;
9195 pqi_ctrl_block_requests(ctrl_info);
9196 if (!pqi_disable_ctrl_shutdown)
9197 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason);
9198 pci_disable_device(ctrl_info->pci_dev);
9199 dev_err(&ctrl_info->pci_dev->dev,
9200 "controller offline: reason code 0x%x (%s)\n",
9201 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason));
9202 schedule_work(&ctrl_info->ctrl_offline_work);
9205 static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
9206 const struct pci_device_id *id)
9208 char *ctrl_description;
9210 if (id->driver_data)
9211 ctrl_description = (char *)id->driver_data;
9213 ctrl_description = "Microchip Smart Family Controller";
9215 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
9218 static int pqi_pci_probe(struct pci_dev *pci_dev,
9219 const struct pci_device_id *id)
9223 struct pqi_ctrl_info *ctrl_info;
9225 pqi_print_ctrl_info(pci_dev, id);
9227 if (pqi_disable_device_id_wildcards &&
9228 id->subvendor == PCI_ANY_ID &&
9229 id->subdevice == PCI_ANY_ID) {
9230 dev_warn(&pci_dev->dev,
9231 "controller not probed because device ID wildcards are disabled\n");
9235 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
9236 dev_warn(&pci_dev->dev,
9237 "controller device ID matched using wildcards\n");
9239 node = dev_to_node(&pci_dev->dev);
9240 if (node == NUMA_NO_NODE) {
9241 node = cpu_to_node(0);
9242 if (node == NUMA_NO_NODE)
9244 set_dev_node(&pci_dev->dev, node);
9247 ctrl_info = pqi_alloc_ctrl_info(node);
9249 dev_err(&pci_dev->dev,
9250 "failed to allocate controller info block\n");
9253 ctrl_info->numa_node = node;
9255 ctrl_info->pci_dev = pci_dev;
9257 rc = pqi_pci_init(ctrl_info);
9261 rc = pqi_ctrl_init(ctrl_info);
9268 pqi_remove_ctrl(ctrl_info);
9273 static void pqi_pci_remove(struct pci_dev *pci_dev)
9275 struct pqi_ctrl_info *ctrl_info;
9279 ctrl_info = pci_get_drvdata(pci_dev);
9283 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
9284 if (vendor_id == 0xffff)
9285 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL;
9287 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL;
9289 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) {
9290 rc = pqi_flush_cache(ctrl_info, RESTART);
9292 dev_err(&pci_dev->dev,
9293 "unable to flush controller cache during remove\n");
9296 pqi_remove_ctrl(ctrl_info);
9299 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
9302 struct pqi_io_request *io_request;
9303 struct scsi_cmnd *scmd;
9305 for (i = 0; i < ctrl_info->max_io_slots; i++) {
9306 io_request = &ctrl_info->io_request_pool[i];
9307 if (atomic_read(&io_request->refcount) == 0)
9309 scmd = io_request->scmd;
9310 WARN_ON(scmd != NULL); /* IO command from SML */
9311 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
9315 static void pqi_shutdown(struct pci_dev *pci_dev)
9318 struct pqi_ctrl_info *ctrl_info;
9319 enum bmic_flush_cache_shutdown_event shutdown_event;
9321 ctrl_info = pci_get_drvdata(pci_dev);
9323 dev_err(&pci_dev->dev,
9324 "cache could not be flushed\n");
9328 pqi_wait_until_ofa_finished(ctrl_info);
9330 pqi_scsi_block_requests(ctrl_info);
9331 pqi_ctrl_block_device_reset(ctrl_info);
9332 pqi_ctrl_block_requests(ctrl_info);
9333 pqi_ctrl_wait_until_quiesced(ctrl_info);
9335 if (system_state == SYSTEM_RESTART)
9336 shutdown_event = RESTART;
9338 shutdown_event = SHUTDOWN;
9341 * Write all data in the controller's battery-backed cache to
9344 rc = pqi_flush_cache(ctrl_info, shutdown_event);
9346 dev_err(&pci_dev->dev,
9347 "unable to flush controller cache during shutdown\n");
9349 pqi_crash_if_pending_command(ctrl_info);
9350 pqi_reset(ctrl_info);
9353 static void pqi_process_lockup_action_param(void)
9357 if (!pqi_lockup_action_param)
9360 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
9361 if (strcmp(pqi_lockup_action_param,
9362 pqi_lockup_actions[i].name) == 0) {
9363 pqi_lockup_action = pqi_lockup_actions[i].action;
9368 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
9369 DRIVER_NAME_SHORT, pqi_lockup_action_param);
9372 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30
9373 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60)
9375 static void pqi_process_ctrl_ready_timeout_param(void)
9377 if (pqi_ctrl_ready_timeout_secs == 0)
9380 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) {
9381 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n",
9382 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS);
9383 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS;
9384 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) {
9385 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n",
9386 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS);
9387 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS;
9390 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs;
9393 static void pqi_process_module_params(void)
9395 pqi_process_lockup_action_param();
9396 pqi_process_ctrl_ready_timeout_param();
9399 #if defined(CONFIG_PM)
9401 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
9403 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304)
9409 static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
9411 struct pci_dev *pci_dev;
9412 struct pqi_ctrl_info *ctrl_info;
9414 pci_dev = to_pci_dev(dev);
9415 ctrl_info = pci_get_drvdata(pci_dev);
9417 pqi_wait_until_ofa_finished(ctrl_info);
9419 pqi_ctrl_block_scan(ctrl_info);
9420 pqi_scsi_block_requests(ctrl_info);
9421 pqi_ctrl_block_device_reset(ctrl_info);
9422 pqi_ctrl_block_requests(ctrl_info);
9423 pqi_ctrl_wait_until_quiesced(ctrl_info);
9426 enum bmic_flush_cache_shutdown_event shutdown_event;
9428 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9429 pqi_flush_cache(ctrl_info, shutdown_event);
9432 pqi_stop_heartbeat_timer(ctrl_info);
9433 pqi_crash_if_pending_command(ctrl_info);
9434 pqi_free_irqs(ctrl_info);
9436 ctrl_info->controller_online = false;
9437 ctrl_info->pqi_mode_enabled = false;
9442 static __maybe_unused int pqi_suspend(struct device *dev)
9444 return pqi_suspend_or_freeze(dev, true);
9447 static int pqi_resume_or_restore(struct device *dev)
9450 struct pci_dev *pci_dev;
9451 struct pqi_ctrl_info *ctrl_info;
9453 pci_dev = to_pci_dev(dev);
9454 ctrl_info = pci_get_drvdata(pci_dev);
9456 rc = pqi_request_irqs(ctrl_info);
9460 pqi_ctrl_unblock_device_reset(ctrl_info);
9461 pqi_ctrl_unblock_requests(ctrl_info);
9462 pqi_scsi_unblock_requests(ctrl_info);
9463 pqi_ctrl_unblock_scan(ctrl_info);
9465 ssleep(PQI_POST_RESET_DELAY_SECS);
9467 return pqi_ctrl_init_resume(ctrl_info);
9470 static int pqi_freeze(struct device *dev)
9472 return pqi_suspend_or_freeze(dev, false);
9475 static int pqi_thaw(struct device *dev)
9478 struct pci_dev *pci_dev;
9479 struct pqi_ctrl_info *ctrl_info;
9481 pci_dev = to_pci_dev(dev);
9482 ctrl_info = pci_get_drvdata(pci_dev);
9484 rc = pqi_request_irqs(ctrl_info);
9488 ctrl_info->controller_online = true;
9489 ctrl_info->pqi_mode_enabled = true;
9491 pqi_ctrl_unblock_device_reset(ctrl_info);
9492 pqi_ctrl_unblock_requests(ctrl_info);
9493 pqi_scsi_unblock_requests(ctrl_info);
9494 pqi_ctrl_unblock_scan(ctrl_info);
9499 static int pqi_poweroff(struct device *dev)
9501 struct pci_dev *pci_dev;
9502 struct pqi_ctrl_info *ctrl_info;
9503 enum bmic_flush_cache_shutdown_event shutdown_event;
9505 pci_dev = to_pci_dev(dev);
9506 ctrl_info = pci_get_drvdata(pci_dev);
9508 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev);
9509 pqi_flush_cache(ctrl_info, shutdown_event);
9514 static const struct dev_pm_ops pqi_pm_ops = {
9515 .suspend = pqi_suspend,
9516 .resume = pqi_resume_or_restore,
9517 .freeze = pqi_freeze,
9519 .poweroff = pqi_poweroff,
9520 .restore = pqi_resume_or_restore,
9523 #endif /* CONFIG_PM */
9525 /* Define the PCI IDs for the controllers that we support. */
9526 static const struct pci_device_id pqi_pci_id_table[] = {
9528 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9532 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9536 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9540 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9544 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9548 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9552 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9556 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9560 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9564 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9568 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9572 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9576 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9580 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9584 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9588 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9592 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9596 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9600 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9604 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9608 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9612 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9616 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9620 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9624 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9628 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9632 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9636 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9640 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9644 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9648 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9652 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9656 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9660 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9664 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9668 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9672 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9676 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9680 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9684 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9688 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9692 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9696 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9700 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9704 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9708 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9712 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9716 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9720 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9724 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9728 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9732 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9736 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9740 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9744 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9745 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
9748 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9749 PCI_VENDOR_ID_ADAPTEC2, 0x0608)
9752 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9753 PCI_VENDOR_ID_ADAPTEC2, 0x0659)
9756 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9757 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
9760 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9761 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
9764 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9765 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
9768 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9769 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
9772 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9773 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
9776 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9777 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
9780 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9781 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
9784 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9785 PCI_VENDOR_ID_ADAPTEC2, 0x0807)
9788 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9789 PCI_VENDOR_ID_ADAPTEC2, 0x0808)
9792 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9793 PCI_VENDOR_ID_ADAPTEC2, 0x0809)
9796 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9797 PCI_VENDOR_ID_ADAPTEC2, 0x080a)
9800 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9801 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
9804 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9805 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
9808 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9809 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
9812 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9813 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
9816 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9817 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
9820 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9821 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
9824 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9825 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
9828 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9829 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
9832 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9833 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
9836 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9837 PCI_VENDOR_ID_ADAPTEC2, 0x090a)
9840 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9841 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
9844 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9845 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
9848 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9849 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
9852 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9853 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
9856 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9857 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
9860 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9861 PCI_VENDOR_ID_ADAPTEC2, 0x1282)
9864 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9865 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
9868 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9869 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
9872 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9873 PCI_VENDOR_ID_ADAPTEC2, 0x1302)
9876 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9877 PCI_VENDOR_ID_ADAPTEC2, 0x1303)
9880 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9881 PCI_VENDOR_ID_ADAPTEC2, 0x1304)
9884 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9885 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
9888 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9889 PCI_VENDOR_ID_ADAPTEC2, 0x1400)
9892 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9893 PCI_VENDOR_ID_ADAPTEC2, 0x1402)
9896 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9897 PCI_VENDOR_ID_ADAPTEC2, 0x1410)
9900 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9901 PCI_VENDOR_ID_ADAPTEC2, 0x1411)
9904 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9905 PCI_VENDOR_ID_ADAPTEC2, 0x1412)
9908 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9909 PCI_VENDOR_ID_ADAPTEC2, 0x1420)
9912 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9913 PCI_VENDOR_ID_ADAPTEC2, 0x1430)
9916 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9917 PCI_VENDOR_ID_ADAPTEC2, 0x1440)
9920 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9921 PCI_VENDOR_ID_ADAPTEC2, 0x1441)
9924 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9925 PCI_VENDOR_ID_ADAPTEC2, 0x1450)
9928 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9929 PCI_VENDOR_ID_ADAPTEC2, 0x1452)
9932 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9933 PCI_VENDOR_ID_ADAPTEC2, 0x1460)
9936 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9937 PCI_VENDOR_ID_ADAPTEC2, 0x1461)
9940 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9941 PCI_VENDOR_ID_ADAPTEC2, 0x1462)
9944 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9945 PCI_VENDOR_ID_ADAPTEC2, 0x1463)
9948 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9949 PCI_VENDOR_ID_ADAPTEC2, 0x1470)
9952 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9953 PCI_VENDOR_ID_ADAPTEC2, 0x1471)
9956 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9957 PCI_VENDOR_ID_ADAPTEC2, 0x1472)
9960 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9961 PCI_VENDOR_ID_ADAPTEC2, 0x1473)
9964 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9965 PCI_VENDOR_ID_ADAPTEC2, 0x1474)
9968 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9969 PCI_VENDOR_ID_ADAPTEC2, 0x1475)
9972 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9973 PCI_VENDOR_ID_ADAPTEC2, 0x1480)
9976 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9977 PCI_VENDOR_ID_ADAPTEC2, 0x1490)
9980 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9981 PCI_VENDOR_ID_ADAPTEC2, 0x1491)
9984 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9985 PCI_VENDOR_ID_ADAPTEC2, 0x14a0)
9988 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9989 PCI_VENDOR_ID_ADAPTEC2, 0x14a1)
9992 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9993 PCI_VENDOR_ID_ADAPTEC2, 0x14a2)
9996 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
9997 PCI_VENDOR_ID_ADAPTEC2, 0x14a4)
10000 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10001 PCI_VENDOR_ID_ADAPTEC2, 0x14a5)
10004 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10005 PCI_VENDOR_ID_ADAPTEC2, 0x14a6)
10008 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10009 PCI_VENDOR_ID_ADAPTEC2, 0x14b0)
10012 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10013 PCI_VENDOR_ID_ADAPTEC2, 0x14b1)
10016 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10017 PCI_VENDOR_ID_ADAPTEC2, 0x14c0)
10020 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10021 PCI_VENDOR_ID_ADAPTEC2, 0x14c1)
10024 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10025 PCI_VENDOR_ID_ADAPTEC2, 0x14c2)
10028 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10029 PCI_VENDOR_ID_ADAPTEC2, 0x14c3)
10032 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10033 PCI_VENDOR_ID_ADAPTEC2, 0x14c4)
10036 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10037 PCI_VENDOR_ID_ADAPTEC2, 0x14d0)
10040 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10041 PCI_VENDOR_ID_ADAPTEC2, 0x14e0)
10044 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10045 PCI_VENDOR_ID_ADAPTEC2, 0x14f0)
10048 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10049 PCI_VENDOR_ID_ADVANTECH, 0x8312)
10052 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10053 PCI_VENDOR_ID_DELL, 0x1fe0)
10056 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10057 PCI_VENDOR_ID_HP, 0x0600)
10060 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10061 PCI_VENDOR_ID_HP, 0x0601)
10064 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10065 PCI_VENDOR_ID_HP, 0x0602)
10068 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10069 PCI_VENDOR_ID_HP, 0x0603)
10072 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10073 PCI_VENDOR_ID_HP, 0x0609)
10076 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10077 PCI_VENDOR_ID_HP, 0x0650)
10080 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10081 PCI_VENDOR_ID_HP, 0x0651)
10084 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10085 PCI_VENDOR_ID_HP, 0x0652)
10088 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10089 PCI_VENDOR_ID_HP, 0x0653)
10092 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10093 PCI_VENDOR_ID_HP, 0x0654)
10096 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10097 PCI_VENDOR_ID_HP, 0x0655)
10100 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10101 PCI_VENDOR_ID_HP, 0x0700)
10104 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10105 PCI_VENDOR_ID_HP, 0x0701)
10108 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10109 PCI_VENDOR_ID_HP, 0x1001)
10112 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10113 PCI_VENDOR_ID_HP, 0x1002)
10116 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10117 PCI_VENDOR_ID_HP, 0x1100)
10120 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10121 PCI_VENDOR_ID_HP, 0x1101)
10124 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10128 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10132 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10136 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10140 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10144 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10148 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10152 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10156 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10160 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10164 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10168 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10172 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10173 PCI_VENDOR_ID_GIGABYTE, 0x1000)
10176 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10180 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10184 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10188 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10192 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10196 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10200 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10204 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10208 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10212 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10216 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10220 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10224 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10228 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10232 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10248 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10252 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10256 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10260 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10264 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10265 PCI_VENDOR_ID_LENOVO, 0x0220)
10268 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10269 PCI_VENDOR_ID_LENOVO, 0x0221)
10272 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10273 PCI_VENDOR_ID_LENOVO, 0x0520)
10276 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10277 PCI_VENDOR_ID_LENOVO, 0x0522)
10280 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10281 PCI_VENDOR_ID_LENOVO, 0x0620)
10284 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10285 PCI_VENDOR_ID_LENOVO, 0x0621)
10288 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10289 PCI_VENDOR_ID_LENOVO, 0x0622)
10292 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10293 PCI_VENDOR_ID_LENOVO, 0x0623)
10296 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10300 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10304 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10308 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10312 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10316 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10320 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10324 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10328 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10332 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10336 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10340 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10344 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10348 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10352 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10356 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10360 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10364 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10368 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10372 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10376 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10380 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10384 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10388 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10392 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10452 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10456 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10460 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10464 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10468 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10472 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10476 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10480 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10484 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10488 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10492 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10496 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10500 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10504 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
10505 PCI_ANY_ID, PCI_ANY_ID)
10510 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
10512 static struct pci_driver pqi_pci_driver = {
10513 .name = DRIVER_NAME_SHORT,
10514 .id_table = pqi_pci_id_table,
10515 .probe = pqi_pci_probe,
10516 .remove = pqi_pci_remove,
10517 .shutdown = pqi_shutdown,
10518 #if defined(CONFIG_PM)
10525 static int __init pqi_init(void)
10529 pr_info(DRIVER_NAME "\n");
10530 pqi_verify_structures();
10531 sis_verify_structures();
10533 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
10534 if (!pqi_sas_transport_template)
10537 pqi_process_module_params();
10539 rc = pci_register_driver(&pqi_pci_driver);
10541 sas_release_transport(pqi_sas_transport_template);
10546 static void __exit pqi_cleanup(void)
10548 pci_unregister_driver(&pqi_pci_driver);
10549 sas_release_transport(pqi_sas_transport_template);
10552 module_init(pqi_init);
10553 module_exit(pqi_cleanup);
10555 static void pqi_verify_structures(void)
10557 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10558 sis_host_to_ctrl_doorbell) != 0x20);
10559 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10560 sis_interrupt_mask) != 0x34);
10561 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10562 sis_ctrl_to_host_doorbell) != 0x9c);
10563 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10564 sis_ctrl_to_host_doorbell_clear) != 0xa0);
10565 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10566 sis_driver_scratch) != 0xb0);
10567 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10568 sis_product_identifier) != 0xb4);
10569 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10570 sis_firmware_status) != 0xbc);
10571 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10572 sis_ctrl_shutdown_reason_code) != 0xcc);
10573 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10574 sis_mailbox) != 0x1000);
10575 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
10576 pqi_registers) != 0x4000);
10578 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10580 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10581 iu_length) != 0x2);
10582 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10583 response_queue_id) != 0x4);
10584 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
10585 driver_flags) != 0x6);
10586 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
10588 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10590 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10591 service_response) != 0x1);
10592 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10593 data_present) != 0x2);
10594 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10596 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10597 residual_count) != 0x4);
10598 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10599 data_length) != 0x8);
10600 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10601 reserved1) != 0xa);
10602 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
10604 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
10606 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10607 data_in_result) != 0x0);
10608 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10609 data_out_result) != 0x1);
10610 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10612 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10614 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10615 status_qualifier) != 0x6);
10616 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10617 sense_data_length) != 0x8);
10618 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10619 response_data_length) != 0xa);
10620 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10621 data_in_transferred) != 0xc);
10622 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10623 data_out_transferred) != 0x10);
10624 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
10626 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
10628 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10629 signature) != 0x0);
10630 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10631 function_and_status_code) != 0x8);
10632 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10633 max_admin_iq_elements) != 0x10);
10634 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10635 max_admin_oq_elements) != 0x11);
10636 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10637 admin_iq_element_length) != 0x12);
10638 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10639 admin_oq_element_length) != 0x13);
10640 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10641 max_reset_timeout) != 0x14);
10642 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10643 legacy_intx_status) != 0x18);
10644 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10645 legacy_intx_mask_set) != 0x1c);
10646 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10647 legacy_intx_mask_clear) != 0x20);
10648 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10649 device_status) != 0x40);
10650 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10651 admin_iq_pi_offset) != 0x48);
10652 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10653 admin_oq_ci_offset) != 0x50);
10654 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10655 admin_iq_element_array_addr) != 0x58);
10656 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10657 admin_oq_element_array_addr) != 0x60);
10658 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10659 admin_iq_ci_addr) != 0x68);
10660 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10661 admin_oq_pi_addr) != 0x70);
10662 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10663 admin_iq_num_elements) != 0x78);
10664 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10665 admin_oq_num_elements) != 0x79);
10666 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10667 admin_queue_int_msg_num) != 0x7a);
10668 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10669 device_error) != 0x80);
10670 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10671 error_details) != 0x88);
10672 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10673 device_reset) != 0x90);
10674 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
10675 power_action) != 0x94);
10676 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
10678 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10679 header.iu_type) != 0);
10680 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10681 header.iu_length) != 2);
10682 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10683 header.driver_flags) != 6);
10684 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10686 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10687 function_code) != 10);
10688 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10689 data.report_device_capability.buffer_length) != 44);
10690 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10691 data.report_device_capability.sg_descriptor) != 48);
10692 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10693 data.create_operational_iq.queue_id) != 12);
10694 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10695 data.create_operational_iq.element_array_addr) != 16);
10696 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10697 data.create_operational_iq.ci_addr) != 24);
10698 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10699 data.create_operational_iq.num_elements) != 32);
10700 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10701 data.create_operational_iq.element_length) != 34);
10702 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10703 data.create_operational_iq.queue_protocol) != 36);
10704 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10705 data.create_operational_oq.queue_id) != 12);
10706 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10707 data.create_operational_oq.element_array_addr) != 16);
10708 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10709 data.create_operational_oq.pi_addr) != 24);
10710 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10711 data.create_operational_oq.num_elements) != 32);
10712 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10713 data.create_operational_oq.element_length) != 34);
10714 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10715 data.create_operational_oq.queue_protocol) != 36);
10716 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10717 data.create_operational_oq.int_msg_num) != 40);
10718 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10719 data.create_operational_oq.coalescing_count) != 42);
10720 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10721 data.create_operational_oq.min_coalescing_time) != 44);
10722 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10723 data.create_operational_oq.max_coalescing_time) != 48);
10724 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
10725 data.delete_operational_queue.queue_id) != 12);
10726 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
10727 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10728 data.create_operational_iq) != 64 - 11);
10729 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10730 data.create_operational_oq) != 64 - 11);
10731 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request,
10732 data.delete_operational_queue) != 64 - 11);
10734 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10735 header.iu_type) != 0);
10736 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10737 header.iu_length) != 2);
10738 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10739 header.driver_flags) != 6);
10740 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10742 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10743 function_code) != 10);
10744 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10746 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10747 data.create_operational_iq.status_descriptor) != 12);
10748 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10749 data.create_operational_iq.iq_pi_offset) != 16);
10750 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10751 data.create_operational_oq.status_descriptor) != 12);
10752 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
10753 data.create_operational_oq.oq_ci_offset) != 16);
10754 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
10756 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10757 header.iu_type) != 0);
10758 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10759 header.iu_length) != 2);
10760 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10761 header.response_queue_id) != 4);
10762 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10763 header.driver_flags) != 6);
10764 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10766 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10768 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10769 buffer_length) != 12);
10770 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10771 lun_number) != 16);
10772 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10773 protocol_specific) != 24);
10774 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10775 error_index) != 27);
10776 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10778 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10780 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
10781 sg_descriptors) != 64);
10782 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
10783 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10785 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10786 header.iu_type) != 0);
10787 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10788 header.iu_length) != 2);
10789 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10790 header.response_queue_id) != 4);
10791 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10792 header.driver_flags) != 6);
10793 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10795 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10797 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10798 buffer_length) != 16);
10799 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10800 data_encryption_key_index) != 22);
10801 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10802 encrypt_tweak_lower) != 24);
10803 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10804 encrypt_tweak_upper) != 28);
10805 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10807 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10808 error_index) != 48);
10809 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10810 num_sg_descriptors) != 50);
10811 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10812 cdb_length) != 51);
10813 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10814 lun_number) != 52);
10815 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
10816 sg_descriptors) != 64);
10817 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
10818 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
10820 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10821 header.iu_type) != 0);
10822 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10823 header.iu_length) != 2);
10824 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10826 BUILD_BUG_ON(offsetof(struct pqi_io_response,
10827 error_index) != 10);
10829 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10830 header.iu_type) != 0);
10831 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10832 header.iu_length) != 2);
10833 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10834 header.response_queue_id) != 4);
10835 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10837 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10838 data.report_event_configuration.buffer_length) != 12);
10839 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10840 data.report_event_configuration.sg_descriptors) != 16);
10841 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10842 data.set_event_configuration.global_event_oq_id) != 10);
10843 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10844 data.set_event_configuration.buffer_length) != 12);
10845 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
10846 data.set_event_configuration.sg_descriptors) != 16);
10848 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10849 max_inbound_iu_length) != 6);
10850 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
10851 max_outbound_iu_length) != 14);
10852 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
10854 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10855 data_length) != 0);
10856 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10857 iq_arbitration_priority_support_bitmask) != 8);
10858 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10859 maximum_aw_a) != 9);
10860 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10861 maximum_aw_b) != 10);
10862 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10863 maximum_aw_c) != 11);
10864 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10865 max_inbound_queues) != 16);
10866 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10867 max_elements_per_iq) != 18);
10868 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10869 max_iq_element_length) != 24);
10870 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10871 min_iq_element_length) != 26);
10872 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10873 max_outbound_queues) != 30);
10874 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10875 max_elements_per_oq) != 32);
10876 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10877 intr_coalescing_time_granularity) != 34);
10878 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10879 max_oq_element_length) != 36);
10880 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10881 min_oq_element_length) != 38);
10882 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
10883 iu_layer_descriptors) != 64);
10884 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
10886 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10888 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
10890 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
10892 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10893 num_event_descriptors) != 2);
10894 BUILD_BUG_ON(offsetof(struct pqi_event_config,
10895 descriptors) != 4);
10897 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
10898 ARRAY_SIZE(pqi_supported_event_types));
10900 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10901 header.iu_type) != 0);
10902 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10903 header.iu_length) != 2);
10904 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10906 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10908 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10909 additional_event_id) != 12);
10910 BUILD_BUG_ON(offsetof(struct pqi_event_response,
10912 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
10914 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10915 header.iu_type) != 0);
10916 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10917 header.iu_length) != 2);
10918 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10920 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10922 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
10923 additional_event_id) != 12);
10924 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
10926 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10927 header.iu_type) != 0);
10928 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10929 header.iu_length) != 2);
10930 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10932 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10934 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10936 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10937 lun_number) != 16);
10938 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10939 protocol_specific) != 24);
10940 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10941 outbound_queue_id_to_manage) != 26);
10942 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10943 request_id_to_manage) != 28);
10944 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
10945 task_management_function) != 30);
10946 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
10948 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10949 header.iu_type) != 0);
10950 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10951 header.iu_length) != 2);
10952 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10954 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10956 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10957 additional_response_info) != 12);
10958 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
10959 response_code) != 15);
10960 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
10962 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10963 configured_logical_drive_count) != 0);
10964 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10965 configuration_signature) != 1);
10966 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10967 firmware_version_short) != 5);
10968 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10969 extended_logical_unit_count) != 154);
10970 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10971 firmware_build_number) != 190);
10972 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10973 vendor_id) != 200);
10974 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10975 product_id) != 208);
10976 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10977 extra_controller_flags) != 286);
10978 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10979 controller_mode) != 292);
10980 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10981 spare_part_number) != 293);
10982 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
10983 firmware_version_long) != 325);
10985 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10986 phys_bay_in_box) != 115);
10987 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10988 device_type) != 120);
10989 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10990 redundant_path_present_map) != 1736);
10991 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10992 active_path_number) != 1738);
10993 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10994 alternate_paths_phys_connector) != 1739);
10995 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10996 alternate_paths_phys_box_on_port) != 1755);
10997 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
10998 current_queue_depth_limit) != 1796);
10999 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
11001 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4);
11002 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11004 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11005 subpage_code) != 1);
11006 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header,
11007 buffer_length) != 2);
11009 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4);
11010 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11012 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11013 subpage_code) != 1);
11014 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header,
11015 page_length) != 2);
11017 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage)
11019 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11021 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11022 firmware_read_support) != 4);
11023 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11024 driver_read_support) != 5);
11025 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11026 firmware_write_support) != 6);
11027 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11028 driver_write_support) != 7);
11029 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11030 max_transfer_encrypted_sas_sata) != 8);
11031 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11032 max_transfer_encrypted_nvme) != 10);
11033 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11034 max_write_raid_5_6) != 12);
11035 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11036 max_write_raid_1_10_2drive) != 14);
11037 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage,
11038 max_write_raid_1_10_3drive) != 16);
11040 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
11041 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
11042 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
11043 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11044 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
11045 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11046 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
11047 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
11048 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11049 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
11050 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
11051 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
11053 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
11054 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
11055 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);