2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/aer.h>
56 #include <linux/raid_class.h>
57 #include <asm/unaligned.h>
59 #include "mpt3sas_base.h"
61 #define RAID_CHANNEL 1
63 #define PCIE_CHANNEL 2
66 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67 struct _sas_node *sas_expander);
68 static void _firmware_event_work(struct work_struct *work);
70 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71 struct _sas_device *sas_device);
72 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73 u8 retry_count, u8 is_pd);
74 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76 struct _pcie_device *pcie_device);
78 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81 /* global parameters */
82 LIST_HEAD(mpt3sas_ioc_list);
83 /* global ioc lock for list operations */
84 DEFINE_SPINLOCK(gioc_lock);
86 MODULE_AUTHOR(MPT3SAS_AUTHOR);
87 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
88 MODULE_LICENSE("GPL");
89 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
90 MODULE_ALIAS("mpt2sas");
92 /* local parameters */
93 static u8 scsi_io_cb_idx = -1;
94 static u8 tm_cb_idx = -1;
95 static u8 ctl_cb_idx = -1;
96 static u8 base_cb_idx = -1;
97 static u8 port_enable_cb_idx = -1;
98 static u8 transport_cb_idx = -1;
99 static u8 scsih_cb_idx = -1;
100 static u8 config_cb_idx = -1;
104 static u8 tm_tr_cb_idx = -1 ;
105 static u8 tm_tr_volume_cb_idx = -1 ;
106 static u8 tm_sas_control_cb_idx = -1;
108 /* command line options */
109 static u32 logging_level;
110 MODULE_PARM_DESC(logging_level,
111 " bits for enabling additional logging info (default=0)");
114 static ushort max_sectors = 0xFFFF;
115 module_param(max_sectors, ushort, 0444);
116 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
119 static int missing_delay[2] = {-1, -1};
120 module_param_array(missing_delay, int, NULL, 0444);
121 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
124 #define MPT3SAS_MAX_LUN (16895)
125 static u64 max_lun = MPT3SAS_MAX_LUN;
126 module_param(max_lun, ullong, 0444);
127 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129 static ushort hbas_to_enumerate;
130 module_param(hbas_to_enumerate, ushort, 0444);
131 MODULE_PARM_DESC(hbas_to_enumerate,
132 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
133 1 - enumerates only SAS 2.0 generation HBAs\n \
134 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136 /* diag_buffer_enable is bitwise
138 * bit 1 set = SNAPSHOT
139 * bit 2 set = EXTENDED
141 * Either bit can be set, or both
143 static int diag_buffer_enable = -1;
144 module_param(diag_buffer_enable, int, 0444);
145 MODULE_PARM_DESC(diag_buffer_enable,
146 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
147 static int disable_discovery = -1;
148 module_param(disable_discovery, int, 0444);
149 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
152 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
153 static int prot_mask = -1;
154 module_param(prot_mask, int, 0444);
155 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157 static bool enable_sdev_max_qd;
158 module_param(enable_sdev_max_qd, bool, 0444);
159 MODULE_PARM_DESC(enable_sdev_max_qd,
160 "Enable sdev max qd as can_queue, def=disabled(0)");
162 /* raid transport support */
163 static struct raid_template *mpt3sas_raid_template;
164 static struct raid_template *mpt2sas_raid_template;
168 * struct sense_info - common structure for obtaining sense keys
170 * @asc: additional sense code
171 * @ascq: additional sense code qualifier
179 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
180 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
181 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
182 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
183 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
185 * struct fw_event_work - firmware event struct
186 * @list: link list framework
187 * @work: work object (ioc->fault_reset_work_q)
188 * @ioc: per adapter object
189 * @device_handle: device handle
190 * @VF_ID: virtual function id
191 * @VP_ID: virtual port id
192 * @ignore: flag meaning this event has been marked to ignore
193 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
194 * @refcount: kref for this event
195 * @event_data: reply event data payload follows
197 * This object stored on ioc->fw_event_list.
199 struct fw_event_work {
200 struct list_head list;
201 struct work_struct work;
203 struct MPT3SAS_ADAPTER *ioc;
209 struct kref refcount;
210 char event_data[] __aligned(4);
213 static void fw_event_work_free(struct kref *r)
215 kfree(container_of(r, struct fw_event_work, refcount));
218 static void fw_event_work_get(struct fw_event_work *fw_work)
220 kref_get(&fw_work->refcount);
223 static void fw_event_work_put(struct fw_event_work *fw_work)
225 kref_put(&fw_work->refcount, fw_event_work_free);
228 static struct fw_event_work *alloc_fw_event_work(int len)
230 struct fw_event_work *fw_event;
232 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
236 kref_init(&fw_event->refcount);
241 * struct _scsi_io_transfer - scsi io transfer
242 * @handle: sas device handle (assigned by firmware)
243 * @is_raid: flag set for hidden raid components
244 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
245 * @data_length: data transfer length
246 * @data_dma: dma pointer to data
249 * @cdb_length: cdb length
251 * @timeout: timeout for this command
252 * @VF_ID: virtual function id
253 * @VP_ID: virtual port id
254 * @valid_reply: flag set for reply message
255 * @sense_length: sense length
256 * @ioc_status: ioc status
257 * @scsi_state: scsi state
258 * @scsi_status: scsi staus
259 * @log_info: log information
260 * @transfer_length: data length transfer when there is a reply message
262 * Used for sending internal scsi commands to devices within this module.
263 * Refer to _scsi_send_scsi_io().
265 struct _scsi_io_transfer {
268 enum dma_data_direction dir;
271 u8 sense[SCSI_SENSE_BUFFERSIZE];
279 /* the following bits are only valid when 'valid_reply = 1' */
289 * _scsih_set_debug_level - global setting of ioc->logging_level.
293 * Note: The logging levels are defined in mpt3sas_debug.h.
296 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
298 int ret = param_set_int(val, kp);
299 struct MPT3SAS_ADAPTER *ioc;
304 pr_info("setting logging_level(0x%08x)\n", logging_level);
305 spin_lock(&gioc_lock);
306 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
307 ioc->logging_level = logging_level;
308 spin_unlock(&gioc_lock);
311 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
312 &logging_level, 0644);
315 * _scsih_srch_boot_sas_address - search based on sas_address
316 * @sas_address: sas address
317 * @boot_device: boot device object from bios page 2
319 * Return: 1 when there's a match, 0 means no match.
322 _scsih_srch_boot_sas_address(u64 sas_address,
323 Mpi2BootDeviceSasWwid_t *boot_device)
325 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
329 * _scsih_srch_boot_device_name - search based on device name
330 * @device_name: device name specified in INDENTIFY fram
331 * @boot_device: boot device object from bios page 2
333 * Return: 1 when there's a match, 0 means no match.
336 _scsih_srch_boot_device_name(u64 device_name,
337 Mpi2BootDeviceDeviceName_t *boot_device)
339 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
343 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
344 * @enclosure_logical_id: enclosure logical id
345 * @slot_number: slot number
346 * @boot_device: boot device object from bios page 2
348 * Return: 1 when there's a match, 0 means no match.
351 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
352 Mpi2BootDeviceEnclosureSlot_t *boot_device)
354 return (enclosure_logical_id == le64_to_cpu(boot_device->
355 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
356 SlotNumber)) ? 1 : 0;
360 * _scsih_is_boot_device - search for matching boot device.
361 * @sas_address: sas address
362 * @device_name: device name specified in INDENTIFY fram
363 * @enclosure_logical_id: enclosure logical id
365 * @form: specifies boot device form
366 * @boot_device: boot device object from bios page 2
368 * Return: 1 when there's a match, 0 means no match.
371 _scsih_is_boot_device(u64 sas_address, u64 device_name,
372 u64 enclosure_logical_id, u16 slot, u8 form,
373 Mpi2BiosPage2BootDevice_t *boot_device)
378 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
381 rc = _scsih_srch_boot_sas_address(
382 sas_address, &boot_device->SasWwid);
384 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
385 if (!enclosure_logical_id)
387 rc = _scsih_srch_boot_encl_slot(
388 enclosure_logical_id,
389 slot, &boot_device->EnclosureSlot);
391 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
394 rc = _scsih_srch_boot_device_name(
395 device_name, &boot_device->DeviceName);
397 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
405 * _scsih_get_sas_address - set the sas_address for given device handle
407 * @handle: device handle
408 * @sas_address: sas address
410 * Return: 0 success, non-zero when failure
413 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
416 Mpi2SasDevicePage0_t sas_device_pg0;
417 Mpi2ConfigReply_t mpi_reply;
422 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
423 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
424 ioc_err(ioc, "failure at %s:%d/%s()!\n",
425 __FILE__, __LINE__, __func__);
429 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
430 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
431 /* For HBA, vSES doesn't return HBA SAS address. Instead return
432 * vSES's sas address.
434 if ((handle <= ioc->sas_hba.num_phys) &&
435 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
436 MPI2_SAS_DEVICE_INFO_SEP)))
437 *sas_address = ioc->sas_hba.sas_address;
439 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
443 /* we hit this because the given parent handle doesn't exist */
444 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
447 /* else error case */
448 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
449 handle, ioc_status, __FILE__, __LINE__, __func__);
454 * _scsih_determine_boot_device - determine boot device.
455 * @ioc: per adapter object
456 * @device: sas_device or pcie_device object
457 * @channel: SAS or PCIe channel
459 * Determines whether this device should be first reported device to
460 * to scsi-ml or sas transport, this purpose is for persistent boot device.
461 * There are primary, alternate, and current entries in bios page 2. The order
462 * priority is primary, alternate, then current. This routine saves
463 * the corresponding device object.
464 * The saved data to be used later in _scsih_probe_boot_devices().
467 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
470 struct _sas_device *sas_device;
471 struct _pcie_device *pcie_device;
472 struct _raid_device *raid_device;
475 u64 enclosure_logical_id;
478 /* only process this function when driver loads */
479 if (!ioc->is_driver_loading)
482 /* no Bios, return immediately */
483 if (!ioc->bios_pg3.BiosVersion)
486 if (channel == RAID_CHANNEL) {
487 raid_device = device;
488 sas_address = raid_device->wwid;
490 enclosure_logical_id = 0;
492 } else if (channel == PCIE_CHANNEL) {
493 pcie_device = device;
494 sas_address = pcie_device->wwid;
496 enclosure_logical_id = 0;
500 sas_address = sas_device->sas_address;
501 device_name = sas_device->device_name;
502 enclosure_logical_id = sas_device->enclosure_logical_id;
503 slot = sas_device->slot;
506 if (!ioc->req_boot_device.device) {
507 if (_scsih_is_boot_device(sas_address, device_name,
508 enclosure_logical_id, slot,
509 (ioc->bios_pg2.ReqBootDeviceForm &
510 MPI2_BIOSPAGE2_FORM_MASK),
511 &ioc->bios_pg2.RequestedBootDevice)) {
513 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
514 __func__, (u64)sas_address));
515 ioc->req_boot_device.device = device;
516 ioc->req_boot_device.channel = channel;
520 if (!ioc->req_alt_boot_device.device) {
521 if (_scsih_is_boot_device(sas_address, device_name,
522 enclosure_logical_id, slot,
523 (ioc->bios_pg2.ReqAltBootDeviceForm &
524 MPI2_BIOSPAGE2_FORM_MASK),
525 &ioc->bios_pg2.RequestedAltBootDevice)) {
527 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
528 __func__, (u64)sas_address));
529 ioc->req_alt_boot_device.device = device;
530 ioc->req_alt_boot_device.channel = channel;
534 if (!ioc->current_boot_device.device) {
535 if (_scsih_is_boot_device(sas_address, device_name,
536 enclosure_logical_id, slot,
537 (ioc->bios_pg2.CurrentBootDeviceForm &
538 MPI2_BIOSPAGE2_FORM_MASK),
539 &ioc->bios_pg2.CurrentBootDevice)) {
541 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
542 __func__, (u64)sas_address));
543 ioc->current_boot_device.device = device;
544 ioc->current_boot_device.channel = channel;
549 static struct _sas_device *
550 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
551 struct MPT3SAS_TARGET *tgt_priv)
553 struct _sas_device *ret;
555 assert_spin_locked(&ioc->sas_device_lock);
557 ret = tgt_priv->sas_dev;
564 static struct _sas_device *
565 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
566 struct MPT3SAS_TARGET *tgt_priv)
568 struct _sas_device *ret;
571 spin_lock_irqsave(&ioc->sas_device_lock, flags);
572 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
573 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
578 static struct _pcie_device *
579 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
580 struct MPT3SAS_TARGET *tgt_priv)
582 struct _pcie_device *ret;
584 assert_spin_locked(&ioc->pcie_device_lock);
586 ret = tgt_priv->pcie_dev;
588 pcie_device_get(ret);
594 * mpt3sas_get_pdev_from_target - pcie device search
595 * @ioc: per adapter object
596 * @tgt_priv: starget private object
598 * Context: This function will acquire ioc->pcie_device_lock and will release
599 * before returning the pcie_device object.
601 * This searches for pcie_device from target, then return pcie_device object.
603 static struct _pcie_device *
604 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
605 struct MPT3SAS_TARGET *tgt_priv)
607 struct _pcie_device *ret;
610 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
611 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
612 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
618 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
621 struct _sas_device *sas_device;
623 assert_spin_locked(&ioc->sas_device_lock);
625 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
626 if (sas_device->sas_address == sas_address)
629 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
630 if (sas_device->sas_address == sas_address)
636 sas_device_get(sas_device);
641 * mpt3sas_get_sdev_by_addr - sas device search
642 * @ioc: per adapter object
643 * @sas_address: sas address
644 * Context: Calling function should acquire ioc->sas_device_lock
646 * This searches for sas_device based on sas_address, then return sas_device
650 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
653 struct _sas_device *sas_device;
656 spin_lock_irqsave(&ioc->sas_device_lock, flags);
657 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
659 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
664 static struct _sas_device *
665 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
667 struct _sas_device *sas_device;
669 assert_spin_locked(&ioc->sas_device_lock);
671 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
672 if (sas_device->handle == handle)
675 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
676 if (sas_device->handle == handle)
682 sas_device_get(sas_device);
687 * mpt3sas_get_sdev_by_handle - sas device search
688 * @ioc: per adapter object
689 * @handle: sas device handle (assigned by firmware)
690 * Context: Calling function should acquire ioc->sas_device_lock
692 * This searches for sas_device based on sas_address, then return sas_device
696 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
698 struct _sas_device *sas_device;
701 spin_lock_irqsave(&ioc->sas_device_lock, flags);
702 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
703 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
709 * _scsih_display_enclosure_chassis_info - display device location info
710 * @ioc: per adapter object
711 * @sas_device: per sas device object
712 * @sdev: scsi device struct
713 * @starget: scsi target struct
716 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
717 struct _sas_device *sas_device, struct scsi_device *sdev,
718 struct scsi_target *starget)
721 if (sas_device->enclosure_handle != 0)
722 sdev_printk(KERN_INFO, sdev,
723 "enclosure logical id (0x%016llx), slot(%d) \n",
725 sas_device->enclosure_logical_id,
727 if (sas_device->connector_name[0] != '\0')
728 sdev_printk(KERN_INFO, sdev,
729 "enclosure level(0x%04x), connector name( %s)\n",
730 sas_device->enclosure_level,
731 sas_device->connector_name);
732 if (sas_device->is_chassis_slot_valid)
733 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
734 sas_device->chassis_slot);
735 } else if (starget) {
736 if (sas_device->enclosure_handle != 0)
737 starget_printk(KERN_INFO, starget,
738 "enclosure logical id(0x%016llx), slot(%d) \n",
740 sas_device->enclosure_logical_id,
742 if (sas_device->connector_name[0] != '\0')
743 starget_printk(KERN_INFO, starget,
744 "enclosure level(0x%04x), connector name( %s)\n",
745 sas_device->enclosure_level,
746 sas_device->connector_name);
747 if (sas_device->is_chassis_slot_valid)
748 starget_printk(KERN_INFO, starget,
749 "chassis slot(0x%04x)\n",
750 sas_device->chassis_slot);
752 if (sas_device->enclosure_handle != 0)
753 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
754 (u64)sas_device->enclosure_logical_id,
756 if (sas_device->connector_name[0] != '\0')
757 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
758 sas_device->enclosure_level,
759 sas_device->connector_name);
760 if (sas_device->is_chassis_slot_valid)
761 ioc_info(ioc, "chassis slot(0x%04x)\n",
762 sas_device->chassis_slot);
767 * _scsih_sas_device_remove - remove sas_device from list.
768 * @ioc: per adapter object
769 * @sas_device: the sas_device object
770 * Context: This function will acquire ioc->sas_device_lock.
772 * If sas_device is on the list, remove it and decrement its reference count.
775 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
776 struct _sas_device *sas_device)
782 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
783 sas_device->handle, (u64)sas_device->sas_address);
785 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
788 * The lock serializes access to the list, but we still need to verify
789 * that nobody removed the entry while we were waiting on the lock.
791 spin_lock_irqsave(&ioc->sas_device_lock, flags);
792 if (!list_empty(&sas_device->list)) {
793 list_del_init(&sas_device->list);
794 sas_device_put(sas_device);
796 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
800 * _scsih_device_remove_by_handle - removing device object by handle
801 * @ioc: per adapter object
802 * @handle: device handle
805 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
807 struct _sas_device *sas_device;
810 if (ioc->shost_recovery)
813 spin_lock_irqsave(&ioc->sas_device_lock, flags);
814 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
816 list_del_init(&sas_device->list);
817 sas_device_put(sas_device);
819 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
821 _scsih_remove_device(ioc, sas_device);
822 sas_device_put(sas_device);
827 * mpt3sas_device_remove_by_sas_address - removing device object by sas address
828 * @ioc: per adapter object
829 * @sas_address: device sas_address
832 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
835 struct _sas_device *sas_device;
838 if (ioc->shost_recovery)
841 spin_lock_irqsave(&ioc->sas_device_lock, flags);
842 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
844 list_del_init(&sas_device->list);
845 sas_device_put(sas_device);
847 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
849 _scsih_remove_device(ioc, sas_device);
850 sas_device_put(sas_device);
855 * _scsih_sas_device_add - insert sas_device to the list.
856 * @ioc: per adapter object
857 * @sas_device: the sas_device object
858 * Context: This function will acquire ioc->sas_device_lock.
860 * Adding new object to the ioc->sas_device_list.
863 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
864 struct _sas_device *sas_device)
869 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
870 __func__, sas_device->handle,
871 (u64)sas_device->sas_address));
873 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
876 spin_lock_irqsave(&ioc->sas_device_lock, flags);
877 sas_device_get(sas_device);
878 list_add_tail(&sas_device->list, &ioc->sas_device_list);
879 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
881 if (ioc->hide_drives) {
882 clear_bit(sas_device->handle, ioc->pend_os_device_add);
886 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
887 sas_device->sas_address_parent)) {
888 _scsih_sas_device_remove(ioc, sas_device);
889 } else if (!sas_device->starget) {
891 * When asyn scanning is enabled, its not possible to remove
892 * devices while scanning is turned on due to an oops in
893 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
895 if (!ioc->is_driver_loading) {
896 mpt3sas_transport_port_remove(ioc,
897 sas_device->sas_address,
898 sas_device->sas_address_parent);
899 _scsih_sas_device_remove(ioc, sas_device);
902 clear_bit(sas_device->handle, ioc->pend_os_device_add);
906 * _scsih_sas_device_init_add - insert sas_device to the list.
907 * @ioc: per adapter object
908 * @sas_device: the sas_device object
909 * Context: This function will acquire ioc->sas_device_lock.
911 * Adding new object at driver load time to the ioc->sas_device_init_list.
914 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
915 struct _sas_device *sas_device)
920 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
921 __func__, sas_device->handle,
922 (u64)sas_device->sas_address));
924 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
927 spin_lock_irqsave(&ioc->sas_device_lock, flags);
928 sas_device_get(sas_device);
929 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
930 _scsih_determine_boot_device(ioc, sas_device, 0);
931 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
935 static struct _pcie_device *
936 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
938 struct _pcie_device *pcie_device;
940 assert_spin_locked(&ioc->pcie_device_lock);
942 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
943 if (pcie_device->wwid == wwid)
946 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
947 if (pcie_device->wwid == wwid)
953 pcie_device_get(pcie_device);
959 * mpt3sas_get_pdev_by_wwid - pcie device search
960 * @ioc: per adapter object
963 * Context: This function will acquire ioc->pcie_device_lock and will release
964 * before returning the pcie_device object.
966 * This searches for pcie_device based on wwid, then return pcie_device object.
968 static struct _pcie_device *
969 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
971 struct _pcie_device *pcie_device;
974 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
975 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
976 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
982 static struct _pcie_device *
983 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
986 struct _pcie_device *pcie_device;
988 assert_spin_locked(&ioc->pcie_device_lock);
990 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
991 if (pcie_device->id == id && pcie_device->channel == channel)
994 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
995 if (pcie_device->id == id && pcie_device->channel == channel)
1001 pcie_device_get(pcie_device);
1005 static struct _pcie_device *
1006 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1008 struct _pcie_device *pcie_device;
1010 assert_spin_locked(&ioc->pcie_device_lock);
1012 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1013 if (pcie_device->handle == handle)
1016 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1017 if (pcie_device->handle == handle)
1023 pcie_device_get(pcie_device);
1029 * mpt3sas_get_pdev_by_handle - pcie device search
1030 * @ioc: per adapter object
1031 * @handle: Firmware device handle
1033 * Context: This function will acquire ioc->pcie_device_lock and will release
1034 * before returning the pcie_device object.
1036 * This searches for pcie_device based on handle, then return pcie_device
1039 struct _pcie_device *
1040 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1042 struct _pcie_device *pcie_device;
1043 unsigned long flags;
1045 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1046 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1047 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1053 * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1054 * @ioc: per adapter object
1055 * Context: This function will acquire ioc->pcie_device_lock
1057 * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1058 * which has reported maximum among all available NVMe drives.
1059 * Minimum max_shutdown_latency will be six seconds.
1062 _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1064 struct _pcie_device *pcie_device;
1065 unsigned long flags;
1066 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1068 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1069 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1070 if (pcie_device->shutdown_latency) {
1071 if (shutdown_latency < pcie_device->shutdown_latency)
1073 pcie_device->shutdown_latency;
1076 ioc->max_shutdown_latency = shutdown_latency;
1077 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1081 * _scsih_pcie_device_remove - remove pcie_device from list.
1082 * @ioc: per adapter object
1083 * @pcie_device: the pcie_device object
1084 * Context: This function will acquire ioc->pcie_device_lock.
1086 * If pcie_device is on the list, remove it and decrement its reference count.
1089 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1090 struct _pcie_device *pcie_device)
1092 unsigned long flags;
1093 int was_on_pcie_device_list = 0;
1094 u8 update_latency = 0;
1098 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1099 pcie_device->handle, (u64)pcie_device->wwid);
1100 if (pcie_device->enclosure_handle != 0)
1101 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1102 (u64)pcie_device->enclosure_logical_id,
1104 if (pcie_device->connector_name[0] != '\0')
1105 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1106 pcie_device->enclosure_level,
1107 pcie_device->connector_name);
1109 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1110 if (!list_empty(&pcie_device->list)) {
1111 list_del_init(&pcie_device->list);
1112 was_on_pcie_device_list = 1;
1114 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1116 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1117 if (was_on_pcie_device_list) {
1118 kfree(pcie_device->serial_number);
1119 pcie_device_put(pcie_device);
1123 * This device's RTD3 Entry Latency matches IOC's
1124 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1125 * from the available drives as current drive is getting removed.
1128 _scsih_set_nvme_max_shutdown_latency(ioc);
1133 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1134 * @ioc: per adapter object
1135 * @handle: device handle
1138 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1140 struct _pcie_device *pcie_device;
1141 unsigned long flags;
1142 int was_on_pcie_device_list = 0;
1143 u8 update_latency = 0;
1145 if (ioc->shost_recovery)
1148 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1149 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1151 if (!list_empty(&pcie_device->list)) {
1152 list_del_init(&pcie_device->list);
1153 was_on_pcie_device_list = 1;
1154 pcie_device_put(pcie_device);
1156 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1159 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1160 if (was_on_pcie_device_list) {
1161 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1162 pcie_device_put(pcie_device);
1166 * This device's RTD3 Entry Latency matches IOC's
1167 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1168 * from the available drives as current drive is getting removed.
1171 _scsih_set_nvme_max_shutdown_latency(ioc);
1175 * _scsih_pcie_device_add - add pcie_device object
1176 * @ioc: per adapter object
1177 * @pcie_device: pcie_device object
1179 * This is added to the pcie_device_list link list.
1182 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1183 struct _pcie_device *pcie_device)
1185 unsigned long flags;
1188 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1190 pcie_device->handle, (u64)pcie_device->wwid));
1191 if (pcie_device->enclosure_handle != 0)
1193 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1195 (u64)pcie_device->enclosure_logical_id,
1196 pcie_device->slot));
1197 if (pcie_device->connector_name[0] != '\0')
1199 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1200 __func__, pcie_device->enclosure_level,
1201 pcie_device->connector_name));
1203 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1204 pcie_device_get(pcie_device);
1205 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1206 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1208 if (pcie_device->access_status ==
1209 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1210 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1213 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1214 _scsih_pcie_device_remove(ioc, pcie_device);
1215 } else if (!pcie_device->starget) {
1216 if (!ioc->is_driver_loading) {
1217 /*TODO-- Need to find out whether this condition will occur or not*/
1218 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1221 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1225 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1226 * @ioc: per adapter object
1227 * @pcie_device: the pcie_device object
1228 * Context: This function will acquire ioc->pcie_device_lock.
1230 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1233 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1234 struct _pcie_device *pcie_device)
1236 unsigned long flags;
1239 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1241 pcie_device->handle, (u64)pcie_device->wwid));
1242 if (pcie_device->enclosure_handle != 0)
1244 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1246 (u64)pcie_device->enclosure_logical_id,
1247 pcie_device->slot));
1248 if (pcie_device->connector_name[0] != '\0')
1250 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1251 __func__, pcie_device->enclosure_level,
1252 pcie_device->connector_name));
1254 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1255 pcie_device_get(pcie_device);
1256 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1257 if (pcie_device->access_status !=
1258 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1259 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1260 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1263 * _scsih_raid_device_find_by_id - raid device search
1264 * @ioc: per adapter object
1265 * @id: sas device target id
1266 * @channel: sas device channel
1267 * Context: Calling function should acquire ioc->raid_device_lock
1269 * This searches for raid_device based on target id, then return raid_device
1272 static struct _raid_device *
1273 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1275 struct _raid_device *raid_device, *r;
1278 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1279 if (raid_device->id == id && raid_device->channel == channel) {
1290 * mpt3sas_raid_device_find_by_handle - raid device search
1291 * @ioc: per adapter object
1292 * @handle: sas device handle (assigned by firmware)
1293 * Context: Calling function should acquire ioc->raid_device_lock
1295 * This searches for raid_device based on handle, then return raid_device
1298 struct _raid_device *
1299 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1301 struct _raid_device *raid_device, *r;
1304 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1305 if (raid_device->handle != handle)
1316 * _scsih_raid_device_find_by_wwid - raid device search
1317 * @ioc: per adapter object
1319 * Context: Calling function should acquire ioc->raid_device_lock
1321 * This searches for raid_device based on wwid, then return raid_device
1324 static struct _raid_device *
1325 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1327 struct _raid_device *raid_device, *r;
1330 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1331 if (raid_device->wwid != wwid)
1342 * _scsih_raid_device_add - add raid_device object
1343 * @ioc: per adapter object
1344 * @raid_device: raid_device object
1346 * This is added to the raid_device_list link list.
1349 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1350 struct _raid_device *raid_device)
1352 unsigned long flags;
1355 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1357 raid_device->handle, (u64)raid_device->wwid));
1359 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1360 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1361 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1365 * _scsih_raid_device_remove - delete raid_device object
1366 * @ioc: per adapter object
1367 * @raid_device: raid_device object
1371 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1372 struct _raid_device *raid_device)
1374 unsigned long flags;
1376 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1377 list_del(&raid_device->list);
1379 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1383 * mpt3sas_scsih_expander_find_by_handle - expander device search
1384 * @ioc: per adapter object
1385 * @handle: expander handle (assigned by firmware)
1386 * Context: Calling function should acquire ioc->sas_device_lock
1388 * This searches for expander device based on handle, then returns the
1392 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1394 struct _sas_node *sas_expander, *r;
1397 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1398 if (sas_expander->handle != handle)
1408 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1409 * @ioc: per adapter object
1410 * @handle: enclosure handle (assigned by firmware)
1411 * Context: Calling function should acquire ioc->sas_device_lock
1413 * This searches for enclosure device based on handle, then returns the
1416 static struct _enclosure_node *
1417 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1419 struct _enclosure_node *enclosure_dev, *r;
1422 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1423 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1432 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1433 * @ioc: per adapter object
1434 * @sas_address: sas address
1435 * Context: Calling function should acquire ioc->sas_node_lock.
1437 * This searches for expander device based on sas_address, then returns the
1441 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1444 struct _sas_node *sas_expander, *r;
1447 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1448 if (sas_expander->sas_address != sas_address)
1458 * _scsih_expander_node_add - insert expander device to the list.
1459 * @ioc: per adapter object
1460 * @sas_expander: the sas_device object
1461 * Context: This function will acquire ioc->sas_node_lock.
1463 * Adding new object to the ioc->sas_expander_list.
1466 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1467 struct _sas_node *sas_expander)
1469 unsigned long flags;
1471 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1472 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1473 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1477 * _scsih_is_end_device - determines if device is an end device
1478 * @device_info: bitfield providing information about the device.
1481 * Return: 1 if end device.
1484 _scsih_is_end_device(u32 device_info)
1486 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1487 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1488 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1489 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1496 * _scsih_is_nvme_pciescsi_device - determines if
1497 * device is an pcie nvme/scsi device
1498 * @device_info: bitfield providing information about the device.
1501 * Returns 1 if device is pcie device type nvme/scsi.
1504 _scsih_is_nvme_pciescsi_device(u32 device_info)
1506 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1507 == MPI26_PCIE_DEVINFO_NVME) ||
1508 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1509 == MPI26_PCIE_DEVINFO_SCSI))
1516 * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1517 * @ioc: per adapter object
1520 * Context: This function will acquire ioc->scsi_lookup_lock.
1522 * This will search for a matching channel:id in the scsi_lookup array,
1523 * returning 1 if found.
1526 _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1530 struct scsi_cmnd *scmd;
1533 smid <= ioc->shost->can_queue; smid++) {
1534 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1537 if (scmd->device->id == id &&
1538 scmd->device->channel == channel)
1545 * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1546 * @ioc: per adapter object
1550 * Context: This function will acquire ioc->scsi_lookup_lock.
1552 * This will search for a matching channel:id:lun in the scsi_lookup array,
1553 * returning 1 if found.
1556 _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1557 unsigned int lun, int channel)
1560 struct scsi_cmnd *scmd;
1562 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1564 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1567 if (scmd->device->id == id &&
1568 scmd->device->channel == channel &&
1569 scmd->device->lun == lun)
1576 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1577 * @ioc: per adapter object
1578 * @smid: system request message index
1580 * Return: the smid stored scmd pointer.
1581 * Then will dereference the stored scmd pointer.
1584 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1586 struct scsi_cmnd *scmd = NULL;
1587 struct scsiio_tracker *st;
1588 Mpi25SCSIIORequest_t *mpi_request;
1591 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1592 u32 unique_tag = smid - 1;
1594 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1597 * If SCSI IO request is outstanding at driver level then
1598 * DevHandle filed must be non-zero. If DevHandle is zero
1599 * then it means that this smid is free at driver level,
1602 if (!mpi_request->DevHandle)
1605 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1607 st = scsi_cmd_priv(scmd);
1608 if (st->cb_idx == 0xFF || st->smid == 0)
1616 * scsih_change_queue_depth - setting device queue depth
1617 * @sdev: scsi device struct
1618 * @qdepth: requested queue depth
1620 * Return: queue depth.
1623 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1625 struct Scsi_Host *shost = sdev->host;
1627 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1628 struct MPT3SAS_DEVICE *sas_device_priv_data;
1629 struct MPT3SAS_TARGET *sas_target_priv_data;
1630 struct _sas_device *sas_device;
1631 unsigned long flags;
1633 max_depth = shost->can_queue;
1636 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1639 if (ioc->enable_sdev_max_qd)
1642 sas_device_priv_data = sdev->hostdata;
1643 if (!sas_device_priv_data)
1645 sas_target_priv_data = sas_device_priv_data->sas_target;
1646 if (!sas_target_priv_data)
1648 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1651 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1652 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1654 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1655 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1657 sas_device_put(sas_device);
1659 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1663 if (!sdev->tagged_supported)
1665 if (qdepth > max_depth)
1667 scsi_change_queue_depth(sdev, qdepth);
1668 sdev_printk(KERN_INFO, sdev,
1669 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1670 sdev->queue_depth, sdev->tagged_supported,
1671 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1672 return sdev->queue_depth;
1676 * mpt3sas_scsih_change_queue_depth - setting device queue depth
1677 * @sdev: scsi device struct
1678 * @qdepth: requested queue depth
1683 mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1685 struct Scsi_Host *shost = sdev->host;
1686 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1688 if (ioc->enable_sdev_max_qd)
1689 qdepth = shost->can_queue;
1691 scsih_change_queue_depth(sdev, qdepth);
1695 * scsih_target_alloc - target add routine
1696 * @starget: scsi target struct
1698 * Return: 0 if ok. Any other return is assumed to be an error and
1699 * the device is ignored.
1702 scsih_target_alloc(struct scsi_target *starget)
1704 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1705 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1706 struct MPT3SAS_TARGET *sas_target_priv_data;
1707 struct _sas_device *sas_device;
1708 struct _raid_device *raid_device;
1709 struct _pcie_device *pcie_device;
1710 unsigned long flags;
1711 struct sas_rphy *rphy;
1713 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1715 if (!sas_target_priv_data)
1718 starget->hostdata = sas_target_priv_data;
1719 sas_target_priv_data->starget = starget;
1720 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1723 if (starget->channel == RAID_CHANNEL) {
1724 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1725 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1728 sas_target_priv_data->handle = raid_device->handle;
1729 sas_target_priv_data->sas_address = raid_device->wwid;
1730 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1731 if (ioc->is_warpdrive)
1732 sas_target_priv_data->raid_device = raid_device;
1733 raid_device->starget = starget;
1735 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1740 if (starget->channel == PCIE_CHANNEL) {
1741 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1742 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1745 sas_target_priv_data->handle = pcie_device->handle;
1746 sas_target_priv_data->sas_address = pcie_device->wwid;
1747 sas_target_priv_data->pcie_dev = pcie_device;
1748 pcie_device->starget = starget;
1749 pcie_device->id = starget->id;
1750 pcie_device->channel = starget->channel;
1751 sas_target_priv_data->flags |=
1752 MPT_TARGET_FLAGS_PCIE_DEVICE;
1753 if (pcie_device->fast_path)
1754 sas_target_priv_data->flags |=
1755 MPT_TARGET_FASTPATH_IO;
1757 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1761 /* sas/sata devices */
1762 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1763 rphy = dev_to_rphy(starget->dev.parent);
1764 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1765 rphy->identify.sas_address);
1768 sas_target_priv_data->handle = sas_device->handle;
1769 sas_target_priv_data->sas_address = sas_device->sas_address;
1770 sas_target_priv_data->sas_dev = sas_device;
1771 sas_device->starget = starget;
1772 sas_device->id = starget->id;
1773 sas_device->channel = starget->channel;
1774 if (test_bit(sas_device->handle, ioc->pd_handles))
1775 sas_target_priv_data->flags |=
1776 MPT_TARGET_FLAGS_RAID_COMPONENT;
1777 if (sas_device->fast_path)
1778 sas_target_priv_data->flags |=
1779 MPT_TARGET_FASTPATH_IO;
1781 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1787 * scsih_target_destroy - target destroy routine
1788 * @starget: scsi target struct
1791 scsih_target_destroy(struct scsi_target *starget)
1793 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1794 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795 struct MPT3SAS_TARGET *sas_target_priv_data;
1796 struct _sas_device *sas_device;
1797 struct _raid_device *raid_device;
1798 struct _pcie_device *pcie_device;
1799 unsigned long flags;
1801 sas_target_priv_data = starget->hostdata;
1802 if (!sas_target_priv_data)
1805 if (starget->channel == RAID_CHANNEL) {
1806 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1807 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1810 raid_device->starget = NULL;
1811 raid_device->sdev = NULL;
1813 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1817 if (starget->channel == PCIE_CHANNEL) {
1818 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1819 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1820 sas_target_priv_data);
1821 if (pcie_device && (pcie_device->starget == starget) &&
1822 (pcie_device->id == starget->id) &&
1823 (pcie_device->channel == starget->channel))
1824 pcie_device->starget = NULL;
1828 * Corresponding get() is in _scsih_target_alloc()
1830 sas_target_priv_data->pcie_dev = NULL;
1831 pcie_device_put(pcie_device);
1832 pcie_device_put(pcie_device);
1834 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1838 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1839 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1840 if (sas_device && (sas_device->starget == starget) &&
1841 (sas_device->id == starget->id) &&
1842 (sas_device->channel == starget->channel))
1843 sas_device->starget = NULL;
1847 * Corresponding get() is in _scsih_target_alloc()
1849 sas_target_priv_data->sas_dev = NULL;
1850 sas_device_put(sas_device);
1852 sas_device_put(sas_device);
1854 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1857 kfree(sas_target_priv_data);
1858 starget->hostdata = NULL;
1862 * scsih_slave_alloc - device add routine
1863 * @sdev: scsi device struct
1865 * Return: 0 if ok. Any other return is assumed to be an error and
1866 * the device is ignored.
1869 scsih_slave_alloc(struct scsi_device *sdev)
1871 struct Scsi_Host *shost;
1872 struct MPT3SAS_ADAPTER *ioc;
1873 struct MPT3SAS_TARGET *sas_target_priv_data;
1874 struct MPT3SAS_DEVICE *sas_device_priv_data;
1875 struct scsi_target *starget;
1876 struct _raid_device *raid_device;
1877 struct _sas_device *sas_device;
1878 struct _pcie_device *pcie_device;
1879 unsigned long flags;
1881 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1883 if (!sas_device_priv_data)
1886 sas_device_priv_data->lun = sdev->lun;
1887 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1889 starget = scsi_target(sdev);
1890 sas_target_priv_data = starget->hostdata;
1891 sas_target_priv_data->num_luns++;
1892 sas_device_priv_data->sas_target = sas_target_priv_data;
1893 sdev->hostdata = sas_device_priv_data;
1894 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1895 sdev->no_uld_attach = 1;
1897 shost = dev_to_shost(&starget->dev);
1898 ioc = shost_priv(shost);
1899 if (starget->channel == RAID_CHANNEL) {
1900 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1901 raid_device = _scsih_raid_device_find_by_id(ioc,
1902 starget->id, starget->channel);
1904 raid_device->sdev = sdev; /* raid is single lun */
1905 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1907 if (starget->channel == PCIE_CHANNEL) {
1908 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1910 sas_target_priv_data->sas_address);
1911 if (pcie_device && (pcie_device->starget == NULL)) {
1912 sdev_printk(KERN_INFO, sdev,
1913 "%s : pcie_device->starget set to starget @ %d\n",
1914 __func__, __LINE__);
1915 pcie_device->starget = starget;
1919 pcie_device_put(pcie_device);
1920 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1922 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1923 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1924 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1925 sas_target_priv_data->sas_address);
1926 if (sas_device && (sas_device->starget == NULL)) {
1927 sdev_printk(KERN_INFO, sdev,
1928 "%s : sas_device->starget set to starget @ %d\n",
1929 __func__, __LINE__);
1930 sas_device->starget = starget;
1934 sas_device_put(sas_device);
1936 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1943 * scsih_slave_destroy - device destroy routine
1944 * @sdev: scsi device struct
1947 scsih_slave_destroy(struct scsi_device *sdev)
1949 struct MPT3SAS_TARGET *sas_target_priv_data;
1950 struct scsi_target *starget;
1951 struct Scsi_Host *shost;
1952 struct MPT3SAS_ADAPTER *ioc;
1953 struct _sas_device *sas_device;
1954 struct _pcie_device *pcie_device;
1955 unsigned long flags;
1957 if (!sdev->hostdata)
1960 starget = scsi_target(sdev);
1961 sas_target_priv_data = starget->hostdata;
1962 sas_target_priv_data->num_luns--;
1964 shost = dev_to_shost(&starget->dev);
1965 ioc = shost_priv(shost);
1967 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1968 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1969 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1970 sas_target_priv_data);
1971 if (pcie_device && !sas_target_priv_data->num_luns)
1972 pcie_device->starget = NULL;
1975 pcie_device_put(pcie_device);
1977 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1979 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1980 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1981 sas_device = __mpt3sas_get_sdev_from_target(ioc,
1982 sas_target_priv_data);
1983 if (sas_device && !sas_target_priv_data->num_luns)
1984 sas_device->starget = NULL;
1987 sas_device_put(sas_device);
1988 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1991 kfree(sdev->hostdata);
1992 sdev->hostdata = NULL;
1996 * _scsih_display_sata_capabilities - sata capabilities
1997 * @ioc: per adapter object
1998 * @handle: device handle
1999 * @sdev: scsi device struct
2002 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2003 u16 handle, struct scsi_device *sdev)
2005 Mpi2ConfigReply_t mpi_reply;
2006 Mpi2SasDevicePage0_t sas_device_pg0;
2011 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2012 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2013 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2014 __FILE__, __LINE__, __func__);
2018 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2019 MPI2_IOCSTATUS_MASK;
2020 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2021 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2022 __FILE__, __LINE__, __func__);
2026 flags = le16_to_cpu(sas_device_pg0.Flags);
2027 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2029 sdev_printk(KERN_INFO, sdev,
2030 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2031 "sw_preserve(%s)\n",
2032 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2033 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2034 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2036 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2037 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2038 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2042 * raid transport support -
2043 * Enabled for SLES11 and newer, in older kernels the driver will panic when
2044 * unloading the driver followed by a load - I believe that the subroutine
2045 * raid_class_release() is not cleaning up properly.
2049 * scsih_is_raid - return boolean indicating device is raid volume
2050 * @dev: the device struct object
2053 scsih_is_raid(struct device *dev)
2055 struct scsi_device *sdev = to_scsi_device(dev);
2056 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2058 if (ioc->is_warpdrive)
2060 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2064 scsih_is_nvme(struct device *dev)
2066 struct scsi_device *sdev = to_scsi_device(dev);
2068 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2072 * scsih_get_resync - get raid volume resync percent complete
2073 * @dev: the device struct object
2076 scsih_get_resync(struct device *dev)
2078 struct scsi_device *sdev = to_scsi_device(dev);
2079 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2080 static struct _raid_device *raid_device;
2081 unsigned long flags;
2082 Mpi2RaidVolPage0_t vol_pg0;
2083 Mpi2ConfigReply_t mpi_reply;
2084 u32 volume_status_flags;
2085 u8 percent_complete;
2088 percent_complete = 0;
2090 if (ioc->is_warpdrive)
2093 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2094 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2097 handle = raid_device->handle;
2098 percent_complete = raid_device->percent_complete;
2100 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2105 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2106 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2107 sizeof(Mpi2RaidVolPage0_t))) {
2108 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2109 __FILE__, __LINE__, __func__);
2110 percent_complete = 0;
2114 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2115 if (!(volume_status_flags &
2116 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2117 percent_complete = 0;
2121 switch (ioc->hba_mpi_version_belonged) {
2123 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2127 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2133 * scsih_get_state - get raid volume level
2134 * @dev: the device struct object
2137 scsih_get_state(struct device *dev)
2139 struct scsi_device *sdev = to_scsi_device(dev);
2140 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2141 static struct _raid_device *raid_device;
2142 unsigned long flags;
2143 Mpi2RaidVolPage0_t vol_pg0;
2144 Mpi2ConfigReply_t mpi_reply;
2146 enum raid_state state = RAID_STATE_UNKNOWN;
2149 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2150 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2153 handle = raid_device->handle;
2154 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2159 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2160 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2161 sizeof(Mpi2RaidVolPage0_t))) {
2162 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2163 __FILE__, __LINE__, __func__);
2167 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2168 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2169 state = RAID_STATE_RESYNCING;
2173 switch (vol_pg0.VolumeState) {
2174 case MPI2_RAID_VOL_STATE_OPTIMAL:
2175 case MPI2_RAID_VOL_STATE_ONLINE:
2176 state = RAID_STATE_ACTIVE;
2178 case MPI2_RAID_VOL_STATE_DEGRADED:
2179 state = RAID_STATE_DEGRADED;
2181 case MPI2_RAID_VOL_STATE_FAILED:
2182 case MPI2_RAID_VOL_STATE_MISSING:
2183 state = RAID_STATE_OFFLINE;
2187 switch (ioc->hba_mpi_version_belonged) {
2189 raid_set_state(mpt2sas_raid_template, dev, state);
2193 raid_set_state(mpt3sas_raid_template, dev, state);
2199 * _scsih_set_level - set raid level
2201 * @sdev: scsi device struct
2202 * @volume_type: volume type
2205 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2206 struct scsi_device *sdev, u8 volume_type)
2208 enum raid_level level = RAID_LEVEL_UNKNOWN;
2210 switch (volume_type) {
2211 case MPI2_RAID_VOL_TYPE_RAID0:
2212 level = RAID_LEVEL_0;
2214 case MPI2_RAID_VOL_TYPE_RAID10:
2215 level = RAID_LEVEL_10;
2217 case MPI2_RAID_VOL_TYPE_RAID1E:
2218 level = RAID_LEVEL_1E;
2220 case MPI2_RAID_VOL_TYPE_RAID1:
2221 level = RAID_LEVEL_1;
2225 switch (ioc->hba_mpi_version_belonged) {
2227 raid_set_level(mpt2sas_raid_template,
2228 &sdev->sdev_gendev, level);
2232 raid_set_level(mpt3sas_raid_template,
2233 &sdev->sdev_gendev, level);
2240 * _scsih_get_volume_capabilities - volume capabilities
2241 * @ioc: per adapter object
2242 * @raid_device: the raid_device object
2244 * Return: 0 for success, else 1
2247 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2248 struct _raid_device *raid_device)
2250 Mpi2RaidVolPage0_t *vol_pg0;
2251 Mpi2RaidPhysDiskPage0_t pd_pg0;
2252 Mpi2SasDevicePage0_t sas_device_pg0;
2253 Mpi2ConfigReply_t mpi_reply;
2257 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2258 &num_pds)) || !num_pds) {
2260 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2261 __FILE__, __LINE__, __func__));
2265 raid_device->num_pds = num_pds;
2266 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2267 sizeof(Mpi2RaidVol0PhysDisk_t));
2268 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2271 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2272 __FILE__, __LINE__, __func__));
2276 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2277 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2279 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2280 __FILE__, __LINE__, __func__));
2285 raid_device->volume_type = vol_pg0->VolumeType;
2287 /* figure out what the underlying devices are by
2288 * obtaining the device_info bits for the 1st device
2290 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2291 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2292 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2293 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2294 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2295 le16_to_cpu(pd_pg0.DevHandle)))) {
2296 raid_device->device_info =
2297 le32_to_cpu(sas_device_pg0.DeviceInfo);
2306 * _scsih_enable_tlr - setting TLR flags
2307 * @ioc: per adapter object
2308 * @sdev: scsi device struct
2310 * Enabling Transaction Layer Retries for tape devices when
2311 * vpd page 0x90 is present
2315 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2319 if (sdev->type != TYPE_TAPE)
2322 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2325 sas_enable_tlr(sdev);
2326 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2327 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2333 * scsih_slave_configure - device configure routine.
2334 * @sdev: scsi device struct
2336 * Return: 0 if ok. Any other return is assumed to be an error and
2337 * the device is ignored.
2340 scsih_slave_configure(struct scsi_device *sdev)
2342 struct Scsi_Host *shost = sdev->host;
2343 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2344 struct MPT3SAS_DEVICE *sas_device_priv_data;
2345 struct MPT3SAS_TARGET *sas_target_priv_data;
2346 struct _sas_device *sas_device;
2347 struct _pcie_device *pcie_device;
2348 struct _raid_device *raid_device;
2349 unsigned long flags;
2354 u16 handle, volume_handle = 0;
2355 u64 volume_wwid = 0;
2358 sas_device_priv_data = sdev->hostdata;
2359 sas_device_priv_data->configured_lun = 1;
2360 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2361 sas_target_priv_data = sas_device_priv_data->sas_target;
2362 handle = sas_target_priv_data->handle;
2364 /* raid volume handling */
2365 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2367 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2368 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2369 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2372 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2373 __FILE__, __LINE__, __func__));
2377 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2379 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2380 __FILE__, __LINE__, __func__));
2385 * WARPDRIVE: Initialize the required data for Direct IO
2387 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2389 /* RAID Queue Depth Support
2390 * IS volume = underlying qdepth of drive type, either
2391 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2392 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2394 if (raid_device->device_info &
2395 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2396 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2399 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2400 if (raid_device->device_info &
2401 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2407 switch (raid_device->volume_type) {
2408 case MPI2_RAID_VOL_TYPE_RAID0:
2411 case MPI2_RAID_VOL_TYPE_RAID1E:
2412 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2413 if (ioc->manu_pg10.OEMIdentifier &&
2414 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2415 MFG10_GF0_R10_DISPLAY) &&
2416 !(raid_device->num_pds % 2))
2421 case MPI2_RAID_VOL_TYPE_RAID1:
2422 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2425 case MPI2_RAID_VOL_TYPE_RAID10:
2426 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2429 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2431 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2436 if (!ioc->hide_ir_msg)
2437 sdev_printk(KERN_INFO, sdev,
2438 "%s: handle(0x%04x), wwid(0x%016llx),"
2439 " pd_count(%d), type(%s)\n",
2440 r_level, raid_device->handle,
2441 (unsigned long long)raid_device->wwid,
2442 raid_device->num_pds, ds);
2444 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2445 blk_queue_max_hw_sectors(sdev->request_queue,
2446 MPT3SAS_RAID_MAX_SECTORS);
2447 sdev_printk(KERN_INFO, sdev,
2448 "Set queue's max_sector to: %u\n",
2449 MPT3SAS_RAID_MAX_SECTORS);
2452 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2454 /* raid transport support */
2455 if (!ioc->is_warpdrive)
2456 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2460 /* non-raid handling */
2461 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2462 if (mpt3sas_config_get_volume_handle(ioc, handle,
2465 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2466 __FILE__, __LINE__, __func__));
2469 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2470 volume_handle, &volume_wwid)) {
2472 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2473 __FILE__, __LINE__, __func__));
2479 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2480 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2481 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2482 sas_device_priv_data->sas_target->sas_address);
2484 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2486 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2487 __FILE__, __LINE__, __func__));
2491 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2493 sdev_printk(KERN_INFO, sdev,
2494 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2495 ds, handle, (unsigned long long)pcie_device->wwid,
2496 pcie_device->port_num);
2497 if (pcie_device->enclosure_handle != 0)
2498 sdev_printk(KERN_INFO, sdev,
2499 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2501 (unsigned long long)pcie_device->enclosure_logical_id,
2503 if (pcie_device->connector_name[0] != '\0')
2504 sdev_printk(KERN_INFO, sdev,
2505 "%s: enclosure level(0x%04x),"
2506 "connector name( %s)\n", ds,
2507 pcie_device->enclosure_level,
2508 pcie_device->connector_name);
2510 if (pcie_device->nvme_mdts)
2511 blk_queue_max_hw_sectors(sdev->request_queue,
2512 pcie_device->nvme_mdts/512);
2514 pcie_device_put(pcie_device);
2515 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2516 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2517 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2518 ** merged and can eliminate holes created during merging
2521 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2522 sdev->request_queue);
2523 blk_queue_virt_boundary(sdev->request_queue,
2524 ioc->page_size - 1);
2528 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2529 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2530 sas_device_priv_data->sas_target->sas_address);
2532 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2534 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2535 __FILE__, __LINE__, __func__));
2539 sas_device->volume_handle = volume_handle;
2540 sas_device->volume_wwid = volume_wwid;
2541 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2542 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2544 if (sas_device->device_info &
2545 MPI2_SAS_DEVICE_INFO_SEP) {
2546 sdev_printk(KERN_WARNING, sdev,
2547 "set ignore_delay_remove for handle(0x%04x)\n",
2548 sas_device_priv_data->sas_target->handle);
2549 sas_device_priv_data->ignore_delay_remove = 1;
2554 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2555 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2557 else if (sas_device->device_info &
2558 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2562 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2563 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2564 ds, handle, (unsigned long long)sas_device->sas_address,
2565 sas_device->phy, (unsigned long long)sas_device->device_name);
2567 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2569 sas_device_put(sas_device);
2570 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2573 _scsih_display_sata_capabilities(ioc, handle, sdev);
2576 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2579 sas_read_port_mode_page(sdev);
2580 _scsih_enable_tlr(ioc, sdev);
2587 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2588 * @sdev: scsi device struct
2589 * @bdev: pointer to block device context
2590 * @capacity: device size (in 512 byte sectors)
2591 * @params: three element array to place output:
2592 * params[0] number of heads (max 255)
2593 * params[1] number of sectors (max 63)
2594 * params[2] number of cylinders
2597 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2598 sector_t capacity, int params[])
2608 dummy = heads * sectors;
2609 cylinders = capacity;
2610 sector_div(cylinders, dummy);
2613 * Handle extended translation size for logical drives
2616 if ((ulong)capacity >= 0x200000) {
2619 dummy = heads * sectors;
2620 cylinders = capacity;
2621 sector_div(cylinders, dummy);
2626 params[1] = sectors;
2627 params[2] = cylinders;
2633 * _scsih_response_code - translation of device response code
2634 * @ioc: per adapter object
2635 * @response_code: response code returned by the device
2638 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2642 switch (response_code) {
2643 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2644 desc = "task management request completed";
2646 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2647 desc = "invalid frame";
2649 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2650 desc = "task management request not supported";
2652 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2653 desc = "task management request failed";
2655 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2656 desc = "task management request succeeded";
2658 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2659 desc = "invalid lun";
2662 desc = "overlapped tag attempted";
2664 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2665 desc = "task queued, however not sent to target";
2671 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2675 * _scsih_tm_done - tm completion routine
2676 * @ioc: per adapter object
2677 * @smid: system request message index
2678 * @msix_index: MSIX table index supplied by the OS
2679 * @reply: reply message frame(lower 32bit addr)
2682 * The callback handler when using scsih_issue_tm.
2684 * Return: 1 meaning mf should be freed from _base_interrupt
2685 * 0 means the mf is freed from this function.
2688 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2690 MPI2DefaultReply_t *mpi_reply;
2692 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2694 if (ioc->tm_cmds.smid != smid)
2696 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2697 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2699 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2700 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2702 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2703 complete(&ioc->tm_cmds.done);
2708 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2709 * @ioc: per adapter object
2710 * @handle: device handle
2712 * During taskmangement request, we need to freeze the device queue.
2715 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2717 struct MPT3SAS_DEVICE *sas_device_priv_data;
2718 struct scsi_device *sdev;
2721 shost_for_each_device(sdev, ioc->shost) {
2724 sas_device_priv_data = sdev->hostdata;
2725 if (!sas_device_priv_data)
2727 if (sas_device_priv_data->sas_target->handle == handle) {
2728 sas_device_priv_data->sas_target->tm_busy = 1;
2730 ioc->ignore_loginfos = 1;
2736 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2737 * @ioc: per adapter object
2738 * @handle: device handle
2740 * During taskmangement request, we need to freeze the device queue.
2743 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2745 struct MPT3SAS_DEVICE *sas_device_priv_data;
2746 struct scsi_device *sdev;
2749 shost_for_each_device(sdev, ioc->shost) {
2752 sas_device_priv_data = sdev->hostdata;
2753 if (!sas_device_priv_data)
2755 if (sas_device_priv_data->sas_target->handle == handle) {
2756 sas_device_priv_data->sas_target->tm_busy = 0;
2758 ioc->ignore_loginfos = 0;
2764 * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2765 * @ioc - per adapter object
2766 * @channel - the channel assigned by the OS
2767 * @id: the id assigned by the OS
2769 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2770 * @smid_task: smid assigned to the task
2772 * Look whether TM has aborted the timed out SCSI command, if
2773 * TM has aborted the IO then return SUCCESS else return FAILED.
2776 scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2777 uint id, uint lun, u8 type, u16 smid_task)
2780 if (smid_task <= ioc->shost->can_queue) {
2782 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2783 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2787 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2788 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2789 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2796 } else if (smid_task == ioc->scsih_cmds.smid) {
2797 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2798 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2800 } else if (smid_task == ioc->ctl_cmds.smid) {
2801 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2802 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2810 * scsih_tm_post_processing - post processing of target & LUN reset
2811 * @ioc - per adapter object
2812 * @handle: device handle
2813 * @channel - the channel assigned by the OS
2814 * @id: the id assigned by the OS
2816 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2817 * @smid_task: smid assigned to the task
2819 * Post processing of target & LUN reset. Due to interrupt latency
2820 * issue it possible that interrupt for aborted IO might not be
2821 * received yet. So before returning failure status, poll the
2822 * reply descriptor pools for the reply of timed out SCSI command.
2823 * Return FAILED status if reply for timed out is not received
2824 * otherwise return SUCCESS.
2827 scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2828 uint channel, uint id, uint lun, u8 type, u16 smid_task)
2832 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2837 "Poll ReplyDescriptor queues for completion of"
2838 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
2839 smid_task, type, handle);
2842 * Due to interrupt latency issues, driver may receive interrupt for
2843 * TM first and then for aborted SCSI IO command. So, poll all the
2844 * ReplyDescriptor pools before returning the FAILED status to SML.
2846 mpt3sas_base_mask_interrupts(ioc);
2847 mpt3sas_base_sync_reply_irqs(ioc, 1);
2848 mpt3sas_base_unmask_interrupts(ioc);
2850 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2854 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2855 * @ioc: per adapter struct
2856 * @handle: device handle
2857 * @channel: the channel assigned by the OS
2858 * @id: the id assigned by the OS
2860 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2861 * @smid_task: smid assigned to the task
2862 * @msix_task: MSIX table index supplied by the OS
2863 * @timeout: timeout in seconds
2864 * @tr_method: Target Reset Method
2867 * A generic API for sending task management requests to firmware.
2869 * The callback index is set inside `ioc->tm_cb_idx`.
2870 * The caller is responsible to check for outstanding commands.
2872 * Return: SUCCESS or FAILED.
2875 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2876 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
2877 u8 timeout, u8 tr_method)
2879 Mpi2SCSITaskManagementRequest_t *mpi_request;
2880 Mpi2SCSITaskManagementReply_t *mpi_reply;
2881 Mpi25SCSIIORequest_t *request;
2887 lockdep_assert_held(&ioc->tm_cmds.mutex);
2889 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2890 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2894 if (ioc->shost_recovery || ioc->remove_host ||
2895 ioc->pci_error_recovery) {
2896 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2900 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2901 if (ioc_state & MPI2_DOORBELL_USED) {
2902 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2903 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2904 return (!rc) ? SUCCESS : FAILED;
2907 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2908 mpt3sas_print_fault_code(ioc, ioc_state &
2909 MPI2_DOORBELL_DATA_MASK);
2910 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2911 return (!rc) ? SUCCESS : FAILED;
2912 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
2913 MPI2_IOC_STATE_COREDUMP) {
2914 mpt3sas_print_coredump_info(ioc, ioc_state &
2915 MPI2_DOORBELL_DATA_MASK);
2916 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2917 return (!rc) ? SUCCESS : FAILED;
2920 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2922 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2927 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2928 handle, type, smid_task, timeout, tr_method));
2929 ioc->tm_cmds.status = MPT3_CMD_PENDING;
2930 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2931 ioc->tm_cmds.smid = smid;
2932 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2933 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2934 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2935 mpi_request->DevHandle = cpu_to_le16(handle);
2936 mpi_request->TaskType = type;
2937 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
2938 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
2939 mpi_request->MsgFlags = tr_method;
2940 mpi_request->TaskMID = cpu_to_le16(smid_task);
2941 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2942 mpt3sas_scsih_set_tm_flag(ioc, handle);
2943 init_completion(&ioc->tm_cmds.done);
2944 ioc->put_smid_hi_priority(ioc, smid, msix_task);
2945 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2946 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2947 mpt3sas_check_cmd_timeout(ioc,
2948 ioc->tm_cmds.status, mpi_request,
2949 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
2951 rc = mpt3sas_base_hard_reset_handler(ioc,
2953 rc = (!rc) ? SUCCESS : FAILED;
2958 /* sync IRQs in case those were busy during flush. */
2959 mpt3sas_base_sync_reply_irqs(ioc, 0);
2961 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2962 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2963 mpi_reply = ioc->tm_cmds.reply;
2965 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2966 le16_to_cpu(mpi_reply->IOCStatus),
2967 le32_to_cpu(mpi_reply->IOCLogInfo),
2968 le32_to_cpu(mpi_reply->TerminationCount)));
2969 if (ioc->logging_level & MPT_DEBUG_TM) {
2970 _scsih_response_code(ioc, mpi_reply->ResponseCode);
2971 if (mpi_reply->IOCStatus)
2972 _debug_dump_mf(mpi_request,
2973 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2978 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
2981 * If DevHandle filed in smid_task's entry of request pool
2982 * doesn't match with device handle on which this task abort
2983 * TM is received then it means that TM has successfully
2984 * aborted the timed out command. Since smid_task's entry in
2985 * request pool will be memset to zero once the timed out
2986 * command is returned to the SML. If the command is not
2987 * aborted then smid_task’s entry won’t be cleared and it
2988 * will have same DevHandle value on which this task abort TM
2989 * is received and driver will return the TM status as FAILED.
2991 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
2992 if (le16_to_cpu(request->DevHandle) != handle)
2995 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
2996 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
2997 handle, timeout, tr_method, smid_task, msix_task);
3001 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3002 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3003 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3004 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3007 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3016 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3017 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3021 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3022 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3023 u16 msix_task, u8 timeout, u8 tr_method)
3027 mutex_lock(&ioc->tm_cmds.mutex);
3028 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3029 smid_task, msix_task, timeout, tr_method);
3030 mutex_unlock(&ioc->tm_cmds.mutex);
3036 * _scsih_tm_display_info - displays info about the device
3037 * @ioc: per adapter struct
3038 * @scmd: pointer to scsi command object
3040 * Called by task management callback handlers.
3043 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3045 struct scsi_target *starget = scmd->device->sdev_target;
3046 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3047 struct _sas_device *sas_device = NULL;
3048 struct _pcie_device *pcie_device = NULL;
3049 unsigned long flags;
3050 char *device_str = NULL;
3054 if (ioc->hide_ir_msg)
3055 device_str = "WarpDrive";
3057 device_str = "volume";
3059 scsi_print_command(scmd);
3060 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3061 starget_printk(KERN_INFO, starget,
3062 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3063 device_str, priv_target->handle,
3064 device_str, (unsigned long long)priv_target->sas_address);
3066 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3067 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3068 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3070 starget_printk(KERN_INFO, starget,
3071 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3072 pcie_device->handle,
3073 (unsigned long long)pcie_device->wwid,
3074 pcie_device->port_num);
3075 if (pcie_device->enclosure_handle != 0)
3076 starget_printk(KERN_INFO, starget,
3077 "enclosure logical id(0x%016llx), slot(%d)\n",
3078 (unsigned long long)
3079 pcie_device->enclosure_logical_id,
3081 if (pcie_device->connector_name[0] != '\0')
3082 starget_printk(KERN_INFO, starget,
3083 "enclosure level(0x%04x), connector name( %s)\n",
3084 pcie_device->enclosure_level,
3085 pcie_device->connector_name);
3086 pcie_device_put(pcie_device);
3088 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3091 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3092 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3094 if (priv_target->flags &
3095 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3096 starget_printk(KERN_INFO, starget,
3097 "volume handle(0x%04x), "
3098 "volume wwid(0x%016llx)\n",
3099 sas_device->volume_handle,
3100 (unsigned long long)sas_device->volume_wwid);
3102 starget_printk(KERN_INFO, starget,
3103 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3105 (unsigned long long)sas_device->sas_address,
3108 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3111 sas_device_put(sas_device);
3113 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3118 * scsih_abort - eh threads main abort routine
3119 * @scmd: pointer to scsi command object
3121 * Return: SUCCESS if command aborted else FAILED
3124 scsih_abort(struct scsi_cmnd *scmd)
3126 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3127 struct MPT3SAS_DEVICE *sas_device_priv_data;
3128 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3133 struct _pcie_device *pcie_device = NULL;
3134 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3135 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3136 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3137 (scmd->request->timeout / HZ) * 1000);
3138 _scsih_tm_display_info(ioc, scmd);
3140 sas_device_priv_data = scmd->device->hostdata;
3141 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3143 sdev_printk(KERN_INFO, scmd->device,
3144 "device been deleted! scmd(0x%p)\n", scmd);
3145 scmd->result = DID_NO_CONNECT << 16;
3146 scmd->scsi_done(scmd);
3151 /* check for completed command */
3152 if (st == NULL || st->cb_idx == 0xFF) {
3153 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3154 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3155 scmd->result = DID_RESET << 16;
3160 /* for hidden raid components and volumes this is not supported */
3161 if (sas_device_priv_data->sas_target->flags &
3162 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3163 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3164 scmd->result = DID_RESET << 16;
3169 mpt3sas_halt_firmware(ioc);
3171 handle = sas_device_priv_data->sas_target->handle;
3172 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3173 if (pcie_device && (!ioc->tm_custom_handling) &&
3174 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3175 timeout = ioc->nvme_abort_timeout;
3176 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3177 scmd->device->id, scmd->device->lun,
3178 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3179 st->smid, st->msix_io, timeout, 0);
3180 /* Command must be cleared after abort */
3181 if (r == SUCCESS && st->cb_idx != 0xFF)
3184 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3185 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3187 pcie_device_put(pcie_device);
3192 * scsih_dev_reset - eh threads main device reset routine
3193 * @scmd: pointer to scsi command object
3195 * Return: SUCCESS if command aborted else FAILED
3198 scsih_dev_reset(struct scsi_cmnd *scmd)
3200 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3201 struct MPT3SAS_DEVICE *sas_device_priv_data;
3202 struct _sas_device *sas_device = NULL;
3203 struct _pcie_device *pcie_device = NULL;
3209 struct scsi_target *starget = scmd->device->sdev_target;
3210 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3212 sdev_printk(KERN_INFO, scmd->device,
3213 "attempting device reset! scmd(0x%p)\n", scmd);
3214 _scsih_tm_display_info(ioc, scmd);
3216 sas_device_priv_data = scmd->device->hostdata;
3217 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3219 sdev_printk(KERN_INFO, scmd->device,
3220 "device been deleted! scmd(0x%p)\n", scmd);
3221 scmd->result = DID_NO_CONNECT << 16;
3222 scmd->scsi_done(scmd);
3227 /* for hidden raid components obtain the volume_handle */
3229 if (sas_device_priv_data->sas_target->flags &
3230 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3231 sas_device = mpt3sas_get_sdev_from_target(ioc,
3234 handle = sas_device->volume_handle;
3236 handle = sas_device_priv_data->sas_target->handle;
3239 scmd->result = DID_RESET << 16;
3244 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3246 if (pcie_device && (!ioc->tm_custom_handling) &&
3247 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3248 tr_timeout = pcie_device->reset_timeout;
3249 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3251 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3253 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3254 scmd->device->id, scmd->device->lun,
3255 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3256 tr_timeout, tr_method);
3257 /* Check for busy commands after reset */
3258 if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
3261 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3262 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3265 sas_device_put(sas_device);
3267 pcie_device_put(pcie_device);
3273 * scsih_target_reset - eh threads main target reset routine
3274 * @scmd: pointer to scsi command object
3276 * Return: SUCCESS if command aborted else FAILED
3279 scsih_target_reset(struct scsi_cmnd *scmd)
3281 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3282 struct MPT3SAS_DEVICE *sas_device_priv_data;
3283 struct _sas_device *sas_device = NULL;
3284 struct _pcie_device *pcie_device = NULL;
3289 struct scsi_target *starget = scmd->device->sdev_target;
3290 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3292 starget_printk(KERN_INFO, starget,
3293 "attempting target reset! scmd(0x%p)\n", scmd);
3294 _scsih_tm_display_info(ioc, scmd);
3296 sas_device_priv_data = scmd->device->hostdata;
3297 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3299 starget_printk(KERN_INFO, starget,
3300 "target been deleted! scmd(0x%p)\n", scmd);
3301 scmd->result = DID_NO_CONNECT << 16;
3302 scmd->scsi_done(scmd);
3307 /* for hidden raid components obtain the volume_handle */
3309 if (sas_device_priv_data->sas_target->flags &
3310 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3311 sas_device = mpt3sas_get_sdev_from_target(ioc,
3314 handle = sas_device->volume_handle;
3316 handle = sas_device_priv_data->sas_target->handle;
3319 scmd->result = DID_RESET << 16;
3324 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3326 if (pcie_device && (!ioc->tm_custom_handling) &&
3327 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3328 tr_timeout = pcie_device->reset_timeout;
3329 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3331 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3332 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3333 scmd->device->id, 0,
3334 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3335 tr_timeout, tr_method);
3336 /* Check for busy commands after reset */
3337 if (r == SUCCESS && atomic_read(&starget->target_busy))
3340 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3341 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3344 sas_device_put(sas_device);
3346 pcie_device_put(pcie_device);
3352 * scsih_host_reset - eh threads main host reset routine
3353 * @scmd: pointer to scsi command object
3355 * Return: SUCCESS if command aborted else FAILED
3358 scsih_host_reset(struct scsi_cmnd *scmd)
3360 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3363 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3364 scsi_print_command(scmd);
3366 if (ioc->is_driver_loading || ioc->remove_host) {
3367 ioc_info(ioc, "Blocking the host reset\n");
3372 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3373 r = (retval < 0) ? FAILED : SUCCESS;
3375 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3376 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3382 * _scsih_fw_event_add - insert and queue up fw_event
3383 * @ioc: per adapter object
3384 * @fw_event: object describing the event
3385 * Context: This function will acquire ioc->fw_event_lock.
3387 * This adds the firmware event object into link list, then queues it up to
3388 * be processed from user context.
3391 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3393 unsigned long flags;
3395 if (ioc->firmware_event_thread == NULL)
3398 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3399 fw_event_work_get(fw_event);
3400 INIT_LIST_HEAD(&fw_event->list);
3401 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3402 INIT_WORK(&fw_event->work, _firmware_event_work);
3403 fw_event_work_get(fw_event);
3404 queue_work(ioc->firmware_event_thread, &fw_event->work);
3405 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3409 * _scsih_fw_event_del_from_list - delete fw_event from the list
3410 * @ioc: per adapter object
3411 * @fw_event: object describing the event
3412 * Context: This function will acquire ioc->fw_event_lock.
3414 * If the fw_event is on the fw_event_list, remove it and do a put.
3417 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3420 unsigned long flags;
3422 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3423 if (!list_empty(&fw_event->list)) {
3424 list_del_init(&fw_event->list);
3425 fw_event_work_put(fw_event);
3427 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3432 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3433 * @ioc: per adapter object
3434 * @event_data: trigger event data
3437 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3438 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3440 struct fw_event_work *fw_event;
3443 if (ioc->is_driver_loading)
3445 sz = sizeof(*event_data);
3446 fw_event = alloc_fw_event_work(sz);
3449 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3450 fw_event->ioc = ioc;
3451 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3452 _scsih_fw_event_add(ioc, fw_event);
3453 fw_event_work_put(fw_event);
3457 * _scsih_error_recovery_delete_devices - remove devices not responding
3458 * @ioc: per adapter object
3461 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3463 struct fw_event_work *fw_event;
3465 if (ioc->is_driver_loading)
3467 fw_event = alloc_fw_event_work(0);
3470 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3471 fw_event->ioc = ioc;
3472 _scsih_fw_event_add(ioc, fw_event);
3473 fw_event_work_put(fw_event);
3477 * mpt3sas_port_enable_complete - port enable completed (fake event)
3478 * @ioc: per adapter object
3481 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3483 struct fw_event_work *fw_event;
3485 fw_event = alloc_fw_event_work(0);
3488 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3489 fw_event->ioc = ioc;
3490 _scsih_fw_event_add(ioc, fw_event);
3491 fw_event_work_put(fw_event);
3494 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3496 unsigned long flags;
3497 struct fw_event_work *fw_event = NULL;
3499 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3500 if (!list_empty(&ioc->fw_event_list)) {
3501 fw_event = list_first_entry(&ioc->fw_event_list,
3502 struct fw_event_work, list);
3503 list_del_init(&fw_event->list);
3505 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3511 * _scsih_fw_event_cleanup_queue - cleanup event queue
3512 * @ioc: per adapter object
3514 * Walk the firmware event queue, either killing timers, or waiting
3515 * for outstanding events to complete
3518 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3520 struct fw_event_work *fw_event;
3522 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3523 !ioc->firmware_event_thread || in_interrupt())
3526 ioc->fw_events_cleanup = 1;
3527 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3528 (fw_event = ioc->current_event)) {
3530 * Wait on the fw_event to complete. If this returns 1, then
3531 * the event was never executed, and we need a put for the
3532 * reference the work had on the fw_event.
3534 * If it did execute, we wait for it to finish, and the put will
3535 * happen from _firmware_event_work()
3537 if (cancel_work_sync(&fw_event->work))
3538 fw_event_work_put(fw_event);
3540 fw_event_work_put(fw_event);
3542 ioc->fw_events_cleanup = 0;
3546 * _scsih_internal_device_block - block the sdev device
3547 * @sdev: per device object
3548 * @sas_device_priv_data : per device driver private data
3550 * make sure device is blocked without error, if not
3554 _scsih_internal_device_block(struct scsi_device *sdev,
3555 struct MPT3SAS_DEVICE *sas_device_priv_data)
3559 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3560 sas_device_priv_data->sas_target->handle);
3561 sas_device_priv_data->block = 1;
3563 r = scsi_internal_device_block_nowait(sdev);
3565 sdev_printk(KERN_WARNING, sdev,
3566 "device_block failed with return(%d) for handle(0x%04x)\n",
3567 r, sas_device_priv_data->sas_target->handle);
3571 * _scsih_internal_device_unblock - unblock the sdev device
3572 * @sdev: per device object
3573 * @sas_device_priv_data : per device driver private data
3574 * make sure device is unblocked without error, if not retry
3575 * by blocking and then unblocking
3579 _scsih_internal_device_unblock(struct scsi_device *sdev,
3580 struct MPT3SAS_DEVICE *sas_device_priv_data)
3584 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3585 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3586 sas_device_priv_data->block = 0;
3587 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3589 /* The device has been set to SDEV_RUNNING by SD layer during
3590 * device addition but the request queue is still stopped by
3591 * our earlier block call. We need to perform a block again
3592 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3594 sdev_printk(KERN_WARNING, sdev,
3595 "device_unblock failed with return(%d) for handle(0x%04x) "
3596 "performing a block followed by an unblock\n",
3597 r, sas_device_priv_data->sas_target->handle);
3598 sas_device_priv_data->block = 1;
3599 r = scsi_internal_device_block_nowait(sdev);
3601 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3602 "failed with return(%d) for handle(0x%04x)\n",
3603 r, sas_device_priv_data->sas_target->handle);
3605 sas_device_priv_data->block = 0;
3606 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3608 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3609 " failed with return(%d) for handle(0x%04x)\n",
3610 r, sas_device_priv_data->sas_target->handle);
3615 * _scsih_ublock_io_all_device - unblock every device
3616 * @ioc: per adapter object
3618 * change the device state from block to running
3621 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3623 struct MPT3SAS_DEVICE *sas_device_priv_data;
3624 struct scsi_device *sdev;
3626 shost_for_each_device(sdev, ioc->shost) {
3627 sas_device_priv_data = sdev->hostdata;
3628 if (!sas_device_priv_data)
3630 if (!sas_device_priv_data->block)
3633 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3634 "device_running, handle(0x%04x)\n",
3635 sas_device_priv_data->sas_target->handle));
3636 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3642 * _scsih_ublock_io_device - prepare device to be deleted
3643 * @ioc: per adapter object
3644 * @sas_address: sas address
3646 * unblock then put device in offline state
3649 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3651 struct MPT3SAS_DEVICE *sas_device_priv_data;
3652 struct scsi_device *sdev;
3654 shost_for_each_device(sdev, ioc->shost) {
3655 sas_device_priv_data = sdev->hostdata;
3656 if (!sas_device_priv_data)
3658 if (sas_device_priv_data->sas_target->sas_address
3661 if (sas_device_priv_data->block)
3662 _scsih_internal_device_unblock(sdev,
3663 sas_device_priv_data);
3668 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3669 * @ioc: per adapter object
3671 * During device pull we need to appropriately set the sdev state.
3674 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3676 struct MPT3SAS_DEVICE *sas_device_priv_data;
3677 struct scsi_device *sdev;
3679 shost_for_each_device(sdev, ioc->shost) {
3680 sas_device_priv_data = sdev->hostdata;
3681 if (!sas_device_priv_data)
3683 if (sas_device_priv_data->block)
3685 if (sas_device_priv_data->ignore_delay_remove) {
3686 sdev_printk(KERN_INFO, sdev,
3687 "%s skip device_block for SES handle(0x%04x)\n",
3688 __func__, sas_device_priv_data->sas_target->handle);
3691 _scsih_internal_device_block(sdev, sas_device_priv_data);
3696 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3697 * @ioc: per adapter object
3698 * @handle: device handle
3700 * During device pull we need to appropriately set the sdev state.
3703 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3705 struct MPT3SAS_DEVICE *sas_device_priv_data;
3706 struct scsi_device *sdev;
3707 struct _sas_device *sas_device;
3709 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3711 shost_for_each_device(sdev, ioc->shost) {
3712 sas_device_priv_data = sdev->hostdata;
3713 if (!sas_device_priv_data)
3715 if (sas_device_priv_data->sas_target->handle != handle)
3717 if (sas_device_priv_data->block)
3719 if (sas_device && sas_device->pend_sas_rphy_add)
3721 if (sas_device_priv_data->ignore_delay_remove) {
3722 sdev_printk(KERN_INFO, sdev,
3723 "%s skip device_block for SES handle(0x%04x)\n",
3724 __func__, sas_device_priv_data->sas_target->handle);
3727 _scsih_internal_device_block(sdev, sas_device_priv_data);
3731 sas_device_put(sas_device);
3735 * _scsih_block_io_to_children_attached_to_ex
3736 * @ioc: per adapter object
3737 * @sas_expander: the sas_device object
3739 * This routine set sdev state to SDEV_BLOCK for all devices
3740 * attached to this expander. This function called when expander is
3744 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3745 struct _sas_node *sas_expander)
3747 struct _sas_port *mpt3sas_port;
3748 struct _sas_device *sas_device;
3749 struct _sas_node *expander_sibling;
3750 unsigned long flags;
3755 list_for_each_entry(mpt3sas_port,
3756 &sas_expander->sas_port_list, port_list) {
3757 if (mpt3sas_port->remote_identify.device_type ==
3759 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3760 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3761 mpt3sas_port->remote_identify.sas_address);
3763 set_bit(sas_device->handle,
3764 ioc->blocking_handles);
3765 sas_device_put(sas_device);
3767 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3771 list_for_each_entry(mpt3sas_port,
3772 &sas_expander->sas_port_list, port_list) {
3774 if (mpt3sas_port->remote_identify.device_type ==
3775 SAS_EDGE_EXPANDER_DEVICE ||
3776 mpt3sas_port->remote_identify.device_type ==
3777 SAS_FANOUT_EXPANDER_DEVICE) {
3779 mpt3sas_scsih_expander_find_by_sas_address(
3780 ioc, mpt3sas_port->remote_identify.sas_address);
3781 _scsih_block_io_to_children_attached_to_ex(ioc,
3788 * _scsih_block_io_to_children_attached_directly
3789 * @ioc: per adapter object
3790 * @event_data: topology change event data
3792 * This routine set sdev state to SDEV_BLOCK for all devices
3793 * direct attached during device pull.
3796 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3797 Mpi2EventDataSasTopologyChangeList_t *event_data)
3803 for (i = 0; i < event_data->NumEntries; i++) {
3804 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3807 reason_code = event_data->PHY[i].PhyStatus &
3808 MPI2_EVENT_SAS_TOPO_RC_MASK;
3809 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3810 _scsih_block_io_device(ioc, handle);
3815 * _scsih_block_io_to_pcie_children_attached_directly
3816 * @ioc: per adapter object
3817 * @event_data: topology change event data
3819 * This routine set sdev state to SDEV_BLOCK for all devices
3820 * direct attached during device pull/reconnect.
3823 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3824 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3830 for (i = 0; i < event_data->NumEntries; i++) {
3832 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3835 reason_code = event_data->PortEntry[i].PortStatus;
3837 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3838 _scsih_block_io_device(ioc, handle);
3842 * _scsih_tm_tr_send - send task management request
3843 * @ioc: per adapter object
3844 * @handle: device handle
3845 * Context: interrupt time.
3847 * This code is to initiate the device removal handshake protocol
3848 * with controller firmware. This function will issue target reset
3849 * using high priority request queue. It will send a sas iounit
3850 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3852 * This is designed to send muliple task management request at the same
3853 * time to the fifo. If the fifo is full, we will append the request,
3854 * and process it in a future completion.
3857 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3859 Mpi2SCSITaskManagementRequest_t *mpi_request;
3861 struct _sas_device *sas_device = NULL;
3862 struct _pcie_device *pcie_device = NULL;
3863 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3864 u64 sas_address = 0;
3865 unsigned long flags;
3866 struct _tr_list *delayed_tr;
3870 if (ioc->pci_error_recovery) {
3872 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3876 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3877 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3879 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3884 /* if PD, then return */
3885 if (test_bit(handle, ioc->pd_handles))
3888 clear_bit(handle, ioc->pend_os_device_add);
3890 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3891 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3892 if (sas_device && sas_device->starget &&
3893 sas_device->starget->hostdata) {
3894 sas_target_priv_data = sas_device->starget->hostdata;
3895 sas_target_priv_data->deleted = 1;
3896 sas_address = sas_device->sas_address;
3898 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3900 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3901 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3902 if (pcie_device && pcie_device->starget &&
3903 pcie_device->starget->hostdata) {
3904 sas_target_priv_data = pcie_device->starget->hostdata;
3905 sas_target_priv_data->deleted = 1;
3906 sas_address = pcie_device->wwid;
3908 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3909 if (pcie_device && (!ioc->tm_custom_handling) &&
3910 (!(mpt3sas_scsih_is_pcie_scsi_device(
3911 pcie_device->device_info))))
3913 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3915 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3917 if (sas_target_priv_data) {
3919 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3920 handle, (u64)sas_address));
3922 if (sas_device->enclosure_handle != 0)
3924 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3925 (u64)sas_device->enclosure_logical_id,
3927 if (sas_device->connector_name[0] != '\0')
3929 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3930 sas_device->enclosure_level,
3931 sas_device->connector_name));
3932 } else if (pcie_device) {
3933 if (pcie_device->enclosure_handle != 0)
3935 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3936 (u64)pcie_device->enclosure_logical_id,
3937 pcie_device->slot));
3938 if (pcie_device->connector_name[0] != '\0')
3940 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3941 pcie_device->enclosure_level,
3942 pcie_device->connector_name));
3944 _scsih_ublock_io_device(ioc, sas_address);
3945 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3948 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3950 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3953 INIT_LIST_HEAD(&delayed_tr->list);
3954 delayed_tr->handle = handle;
3955 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3957 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3963 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3964 handle, smid, ioc->tm_tr_cb_idx));
3965 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3966 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3967 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3968 mpi_request->DevHandle = cpu_to_le16(handle);
3969 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3970 mpi_request->MsgFlags = tr_method;
3971 set_bit(handle, ioc->device_remove_in_progress);
3972 ioc->put_smid_hi_priority(ioc, smid, 0);
3973 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3977 sas_device_put(sas_device);
3979 pcie_device_put(pcie_device);
3983 * _scsih_tm_tr_complete -
3984 * @ioc: per adapter object
3985 * @smid: system request message index
3986 * @msix_index: MSIX table index supplied by the OS
3987 * @reply: reply message frame(lower 32bit addr)
3988 * Context: interrupt time.
3990 * This is the target reset completion routine.
3991 * This code is part of the code to initiate the device removal
3992 * handshake protocol with controller firmware.
3993 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3995 * Return: 1 meaning mf should be freed from _base_interrupt
3996 * 0 means the mf is freed from this function.
3999 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4003 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4004 Mpi2SCSITaskManagementReply_t *mpi_reply =
4005 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4006 Mpi2SasIoUnitControlRequest_t *mpi_request;
4009 struct _sc_list *delayed_sc;
4011 if (ioc->pci_error_recovery) {
4013 ioc_info(ioc, "%s: host in pci error recovery\n",
4017 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4018 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4020 ioc_info(ioc, "%s: host is not operational\n",
4024 if (unlikely(!mpi_reply)) {
4025 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4026 __FILE__, __LINE__, __func__);
4029 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4030 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4031 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4033 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4035 le16_to_cpu(mpi_reply->DevHandle), smid));
4039 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4041 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4042 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4043 le32_to_cpu(mpi_reply->IOCLogInfo),
4044 le32_to_cpu(mpi_reply->TerminationCount)));
4046 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4047 if (!smid_sas_ctrl) {
4048 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4050 return _scsih_check_for_pending_tm(ioc, smid);
4051 INIT_LIST_HEAD(&delayed_sc->list);
4052 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4053 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4055 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4057 return _scsih_check_for_pending_tm(ioc, smid);
4061 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4062 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4063 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4064 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4065 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4066 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4067 mpi_request->DevHandle = mpi_request_tm->DevHandle;
4068 ioc->put_smid_default(ioc, smid_sas_ctrl);
4070 return _scsih_check_for_pending_tm(ioc, smid);
4073 /** _scsih_allow_scmd_to_device - check whether scmd needs to
4074 * issue to IOC or not.
4075 * @ioc: per adapter object
4076 * @scmd: pointer to scsi command object
4078 * Returns true if scmd can be issued to IOC otherwise returns false.
4080 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4081 struct scsi_cmnd *scmd)
4084 if (ioc->pci_error_recovery)
4087 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4088 if (ioc->remove_host)
4094 if (ioc->remove_host) {
4096 switch (scmd->cmnd[0]) {
4097 case SYNCHRONIZE_CACHE:
4109 * _scsih_sas_control_complete - completion routine
4110 * @ioc: per adapter object
4111 * @smid: system request message index
4112 * @msix_index: MSIX table index supplied by the OS
4113 * @reply: reply message frame(lower 32bit addr)
4114 * Context: interrupt time.
4116 * This is the sas iounit control completion routine.
4117 * This code is part of the code to initiate the device removal
4118 * handshake protocol with controller firmware.
4120 * Return: 1 meaning mf should be freed from _base_interrupt
4121 * 0 means the mf is freed from this function.
4124 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4125 u8 msix_index, u32 reply)
4127 Mpi2SasIoUnitControlReply_t *mpi_reply =
4128 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4130 if (likely(mpi_reply)) {
4132 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4133 le16_to_cpu(mpi_reply->DevHandle), smid,
4134 le16_to_cpu(mpi_reply->IOCStatus),
4135 le32_to_cpu(mpi_reply->IOCLogInfo)));
4136 if (le16_to_cpu(mpi_reply->IOCStatus) ==
4137 MPI2_IOCSTATUS_SUCCESS) {
4138 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4139 ioc->device_remove_in_progress);
4142 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4143 __FILE__, __LINE__, __func__);
4145 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4149 * _scsih_tm_tr_volume_send - send target reset request for volumes
4150 * @ioc: per adapter object
4151 * @handle: device handle
4152 * Context: interrupt time.
4154 * This is designed to send muliple task management request at the same
4155 * time to the fifo. If the fifo is full, we will append the request,
4156 * and process it in a future completion.
4159 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4161 Mpi2SCSITaskManagementRequest_t *mpi_request;
4163 struct _tr_list *delayed_tr;
4165 if (ioc->pci_error_recovery) {
4167 ioc_info(ioc, "%s: host reset in progress!\n",
4172 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4174 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4177 INIT_LIST_HEAD(&delayed_tr->list);
4178 delayed_tr->handle = handle;
4179 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4181 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4187 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4188 handle, smid, ioc->tm_tr_volume_cb_idx));
4189 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4190 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4191 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4192 mpi_request->DevHandle = cpu_to_le16(handle);
4193 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4194 ioc->put_smid_hi_priority(ioc, smid, 0);
4198 * _scsih_tm_volume_tr_complete - target reset completion
4199 * @ioc: per adapter object
4200 * @smid: system request message index
4201 * @msix_index: MSIX table index supplied by the OS
4202 * @reply: reply message frame(lower 32bit addr)
4203 * Context: interrupt time.
4205 * Return: 1 meaning mf should be freed from _base_interrupt
4206 * 0 means the mf is freed from this function.
4209 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4210 u8 msix_index, u32 reply)
4213 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4214 Mpi2SCSITaskManagementReply_t *mpi_reply =
4215 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4217 if (ioc->shost_recovery || ioc->pci_error_recovery) {
4219 ioc_info(ioc, "%s: host reset in progress!\n",
4223 if (unlikely(!mpi_reply)) {
4224 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4225 __FILE__, __LINE__, __func__);
4229 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4230 handle = le16_to_cpu(mpi_request_tm->DevHandle);
4231 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4233 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4234 handle, le16_to_cpu(mpi_reply->DevHandle),
4240 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4241 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4242 le32_to_cpu(mpi_reply->IOCLogInfo),
4243 le32_to_cpu(mpi_reply->TerminationCount)));
4245 return _scsih_check_for_pending_tm(ioc, smid);
4249 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4250 * @ioc: per adapter object
4251 * @smid: system request message index
4253 * @event_context: used to track events uniquely
4255 * Context - processed in interrupt context.
4258 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4261 Mpi2EventAckRequest_t *ack_request;
4262 int i = smid - ioc->internal_smid;
4263 unsigned long flags;
4265 /* Without releasing the smid just update the
4266 * call back index and reuse the same smid for
4267 * processing this delayed request
4269 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4270 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4271 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4274 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4275 le16_to_cpu(event), smid, ioc->base_cb_idx));
4276 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4277 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4278 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4279 ack_request->Event = event;
4280 ack_request->EventContext = event_context;
4281 ack_request->VF_ID = 0; /* TODO */
4282 ack_request->VP_ID = 0;
4283 ioc->put_smid_default(ioc, smid);
4287 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4288 * sas_io_unit_ctrl messages
4289 * @ioc: per adapter object
4290 * @smid: system request message index
4291 * @handle: device handle
4293 * Context - processed in interrupt context.
4296 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4297 u16 smid, u16 handle)
4299 Mpi2SasIoUnitControlRequest_t *mpi_request;
4301 int i = smid - ioc->internal_smid;
4302 unsigned long flags;
4304 if (ioc->remove_host) {
4306 ioc_info(ioc, "%s: host has been removed\n",
4309 } else if (ioc->pci_error_recovery) {
4311 ioc_info(ioc, "%s: host in pci error recovery\n",
4315 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4316 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4318 ioc_info(ioc, "%s: host is not operational\n",
4323 /* Without releasing the smid just update the
4324 * call back index and reuse the same smid for
4325 * processing this delayed request
4327 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4328 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4329 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4332 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4333 handle, smid, ioc->tm_sas_control_cb_idx));
4334 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4335 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4336 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4337 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4338 mpi_request->DevHandle = cpu_to_le16(handle);
4339 ioc->put_smid_default(ioc, smid);
4343 * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4344 * @ioc: per adapter object
4345 * @smid: system request message index
4347 * Context: Executed in interrupt context
4349 * This will check delayed internal messages list, and process the
4352 * Return: 1 meaning mf should be freed from _base_interrupt
4353 * 0 means the mf is freed from this function.
4356 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4358 struct _sc_list *delayed_sc;
4359 struct _event_ack_list *delayed_event_ack;
4361 if (!list_empty(&ioc->delayed_event_ack_list)) {
4362 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4363 struct _event_ack_list, list);
4364 _scsih_issue_delayed_event_ack(ioc, smid,
4365 delayed_event_ack->Event, delayed_event_ack->EventContext);
4366 list_del(&delayed_event_ack->list);
4367 kfree(delayed_event_ack);
4371 if (!list_empty(&ioc->delayed_sc_list)) {
4372 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4373 struct _sc_list, list);
4374 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4375 delayed_sc->handle);
4376 list_del(&delayed_sc->list);
4384 * _scsih_check_for_pending_tm - check for pending task management
4385 * @ioc: per adapter object
4386 * @smid: system request message index
4388 * This will check delayed target reset list, and feed the
4391 * Return: 1 meaning mf should be freed from _base_interrupt
4392 * 0 means the mf is freed from this function.
4395 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4397 struct _tr_list *delayed_tr;
4399 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4400 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4401 struct _tr_list, list);
4402 mpt3sas_base_free_smid(ioc, smid);
4403 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4404 list_del(&delayed_tr->list);
4409 if (!list_empty(&ioc->delayed_tr_list)) {
4410 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4411 struct _tr_list, list);
4412 mpt3sas_base_free_smid(ioc, smid);
4413 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4414 list_del(&delayed_tr->list);
4423 * _scsih_check_topo_delete_events - sanity check on topo events
4424 * @ioc: per adapter object
4425 * @event_data: the event data payload
4427 * This routine added to better handle cable breaker.
4429 * This handles the case where driver receives multiple expander
4430 * add and delete events in a single shot. When there is a delete event
4431 * the routine will void any pending add events waiting in the event queue.
4434 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4435 Mpi2EventDataSasTopologyChangeList_t *event_data)
4437 struct fw_event_work *fw_event;
4438 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4439 u16 expander_handle;
4440 struct _sas_node *sas_expander;
4441 unsigned long flags;
4445 for (i = 0 ; i < event_data->NumEntries; i++) {
4446 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4449 reason_code = event_data->PHY[i].PhyStatus &
4450 MPI2_EVENT_SAS_TOPO_RC_MASK;
4451 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4452 _scsih_tm_tr_send(ioc, handle);
4455 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4456 if (expander_handle < ioc->sas_hba.num_phys) {
4457 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4460 if (event_data->ExpStatus ==
4461 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4462 /* put expander attached devices into blocking state */
4463 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4464 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4466 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4467 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4469 handle = find_first_bit(ioc->blocking_handles,
4470 ioc->facts.MaxDevHandle);
4471 if (handle < ioc->facts.MaxDevHandle)
4472 _scsih_block_io_device(ioc, handle);
4473 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4474 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4475 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4477 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4480 /* mark ignore flag for pending events */
4481 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4482 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4483 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4486 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4487 fw_event->event_data;
4488 if (local_event_data->ExpStatus ==
4489 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4490 local_event_data->ExpStatus ==
4491 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4492 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4495 ioc_info(ioc, "setting ignoring flag\n"));
4496 fw_event->ignore = 1;
4500 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4504 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4506 * @ioc: per adapter object
4507 * @event_data: the event data payload
4509 * This handles the case where driver receives multiple switch
4510 * or device add and delete events in a single shot. When there
4511 * is a delete event the routine will void any pending add
4512 * events waiting in the event queue.
4515 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4516 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4518 struct fw_event_work *fw_event;
4519 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4520 unsigned long flags;
4522 u16 handle, switch_handle;
4524 for (i = 0; i < event_data->NumEntries; i++) {
4526 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4529 reason_code = event_data->PortEntry[i].PortStatus;
4530 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4531 _scsih_tm_tr_send(ioc, handle);
4534 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4535 if (!switch_handle) {
4536 _scsih_block_io_to_pcie_children_attached_directly(
4540 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4541 if ((event_data->SwitchStatus
4542 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4543 (event_data->SwitchStatus ==
4544 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4545 _scsih_block_io_to_pcie_children_attached_directly(
4548 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4551 /* mark ignore flag for pending events */
4552 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4553 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4554 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4558 (Mpi26EventDataPCIeTopologyChangeList_t *)
4559 fw_event->event_data;
4560 if (local_event_data->SwitchStatus ==
4561 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4562 local_event_data->SwitchStatus ==
4563 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4564 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4567 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4568 fw_event->ignore = 1;
4572 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4576 * _scsih_set_volume_delete_flag - setting volume delete flag
4577 * @ioc: per adapter object
4578 * @handle: device handle
4580 * This returns nothing.
4583 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4585 struct _raid_device *raid_device;
4586 struct MPT3SAS_TARGET *sas_target_priv_data;
4587 unsigned long flags;
4589 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4590 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4591 if (raid_device && raid_device->starget &&
4592 raid_device->starget->hostdata) {
4593 sas_target_priv_data =
4594 raid_device->starget->hostdata;
4595 sas_target_priv_data->deleted = 1;
4597 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4598 handle, (u64)raid_device->wwid));
4600 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4604 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4605 * @handle: input handle
4606 * @a: handle for volume a
4607 * @b: handle for volume b
4609 * IR firmware only supports two raid volumes. The purpose of this
4610 * routine is to set the volume handle in either a or b. When the given
4611 * input handle is non-zero, or when a and b have not been set before.
4614 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4616 if (!handle || handle == *a || handle == *b)
4625 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4626 * @ioc: per adapter object
4627 * @event_data: the event data payload
4628 * Context: interrupt time.
4630 * This routine will send target reset to volume, followed by target
4631 * resets to the PDs. This is called when a PD has been removed, or
4632 * volume has been deleted or removed. When the target reset is sent
4633 * to volume, the PD target resets need to be queued to start upon
4634 * completion of the volume target reset.
4637 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4638 Mpi2EventDataIrConfigChangeList_t *event_data)
4640 Mpi2EventIrConfigElement_t *element;
4642 u16 handle, volume_handle, a, b;
4643 struct _tr_list *delayed_tr;
4648 if (ioc->is_warpdrive)
4651 /* Volume Resets for Deleted or Removed */
4652 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4653 for (i = 0; i < event_data->NumElements; i++, element++) {
4654 if (le32_to_cpu(event_data->Flags) &
4655 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4657 if (element->ReasonCode ==
4658 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4659 element->ReasonCode ==
4660 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4661 volume_handle = le16_to_cpu(element->VolDevHandle);
4662 _scsih_set_volume_delete_flag(ioc, volume_handle);
4663 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4667 /* Volume Resets for UNHIDE events */
4668 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4669 for (i = 0; i < event_data->NumElements; i++, element++) {
4670 if (le32_to_cpu(event_data->Flags) &
4671 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4673 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4674 volume_handle = le16_to_cpu(element->VolDevHandle);
4675 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4680 _scsih_tm_tr_volume_send(ioc, a);
4682 _scsih_tm_tr_volume_send(ioc, b);
4684 /* PD target resets */
4685 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4686 for (i = 0; i < event_data->NumElements; i++, element++) {
4687 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4689 handle = le16_to_cpu(element->PhysDiskDevHandle);
4690 volume_handle = le16_to_cpu(element->VolDevHandle);
4691 clear_bit(handle, ioc->pd_handles);
4693 _scsih_tm_tr_send(ioc, handle);
4694 else if (volume_handle == a || volume_handle == b) {
4695 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4696 BUG_ON(!delayed_tr);
4697 INIT_LIST_HEAD(&delayed_tr->list);
4698 delayed_tr->handle = handle;
4699 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4701 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4704 _scsih_tm_tr_send(ioc, handle);
4710 * _scsih_check_volume_delete_events - set delete flag for volumes
4711 * @ioc: per adapter object
4712 * @event_data: the event data payload
4713 * Context: interrupt time.
4715 * This will handle the case when the cable connected to entire volume is
4716 * pulled. We will take care of setting the deleted flag so normal IO will
4720 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4721 Mpi2EventDataIrVolume_t *event_data)
4725 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4727 state = le32_to_cpu(event_data->NewValue);
4728 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4729 MPI2_RAID_VOL_STATE_FAILED)
4730 _scsih_set_volume_delete_flag(ioc,
4731 le16_to_cpu(event_data->VolDevHandle));
4735 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4736 * @ioc: per adapter object
4737 * @event_data: the temp threshold event data
4738 * Context: interrupt time.
4741 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4742 Mpi2EventDataTemperature_t *event_data)
4745 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4746 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4747 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4748 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4749 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4750 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4751 event_data->SensorNum);
4752 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4753 event_data->CurrentTemperature);
4754 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4755 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4756 if ((doorbell & MPI2_IOC_STATE_MASK) ==
4757 MPI2_IOC_STATE_FAULT) {
4758 mpt3sas_print_fault_code(ioc,
4759 doorbell & MPI2_DOORBELL_DATA_MASK);
4760 } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4761 MPI2_IOC_STATE_COREDUMP) {
4762 mpt3sas_print_coredump_info(ioc,
4763 doorbell & MPI2_DOORBELL_DATA_MASK);
4769 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4771 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4773 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4777 return test_and_set_bit(0, &priv->ata_command_pending);
4779 clear_bit(0, &priv->ata_command_pending);
4784 * _scsih_flush_running_cmds - completing outstanding commands.
4785 * @ioc: per adapter object
4787 * The flushing out of all pending scmd commands following host reset,
4788 * where all IO is dropped to the floor.
4791 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4793 struct scsi_cmnd *scmd;
4794 struct scsiio_tracker *st;
4798 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4799 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4803 _scsih_set_satl_pending(scmd, false);
4804 st = scsi_cmd_priv(scmd);
4805 mpt3sas_base_clear_st(ioc, st);
4806 scsi_dma_unmap(scmd);
4807 if (ioc->pci_error_recovery || ioc->remove_host)
4808 scmd->result = DID_NO_CONNECT << 16;
4810 scmd->result = DID_RESET << 16;
4811 scmd->scsi_done(scmd);
4813 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4817 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4818 * @ioc: per adapter object
4819 * @scmd: pointer to scsi command object
4820 * @mpi_request: pointer to the SCSI_IO request message frame
4822 * Supporting protection 1 and 3.
4825 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4826 Mpi25SCSIIORequest_t *mpi_request)
4829 unsigned char prot_op = scsi_get_prot_op(scmd);
4830 unsigned char prot_type = scsi_get_prot_type(scmd);
4831 Mpi25SCSIIORequest_t *mpi_request_3v =
4832 (Mpi25SCSIIORequest_t *)mpi_request;
4834 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4837 if (prot_op == SCSI_PROT_READ_STRIP)
4838 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4839 else if (prot_op == SCSI_PROT_WRITE_INSERT)
4840 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4844 switch (prot_type) {
4845 case SCSI_PROT_DIF_TYPE1:
4846 case SCSI_PROT_DIF_TYPE2:
4849 * enable ref/guard checking
4850 * auto increment ref tag
4852 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4853 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4854 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4855 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4856 cpu_to_be32(t10_pi_ref_tag(scmd->request));
4859 case SCSI_PROT_DIF_TYPE3:
4862 * enable guard checking
4864 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4869 mpi_request_3v->EEDPBlockSize =
4870 cpu_to_le16(scmd->device->sector_size);
4872 if (ioc->is_gen35_ioc)
4873 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4874 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4878 * _scsih_eedp_error_handling - return sense code for EEDP errors
4879 * @scmd: pointer to scsi command object
4880 * @ioc_status: ioc status
4883 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4887 switch (ioc_status) {
4888 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4891 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4894 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4901 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4903 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4904 SAM_STAT_CHECK_CONDITION;
4908 * scsih_qcmd - main scsi request entry point
4909 * @shost: SCSI host pointer
4910 * @scmd: pointer to scsi command object
4912 * The callback index is set inside `ioc->scsi_io_cb_idx`.
4914 * Return: 0 on success. If there's a failure, return either:
4915 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4916 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4919 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4921 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4922 struct MPT3SAS_DEVICE *sas_device_priv_data;
4923 struct MPT3SAS_TARGET *sas_target_priv_data;
4924 struct _raid_device *raid_device;
4925 struct request *rq = scmd->request;
4927 Mpi25SCSIIORequest_t *mpi_request;
4928 struct _pcie_device *pcie_device = NULL;
4933 if (ioc->logging_level & MPT_DEBUG_SCSI)
4934 scsi_print_command(scmd);
4936 sas_device_priv_data = scmd->device->hostdata;
4937 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4938 scmd->result = DID_NO_CONNECT << 16;
4939 scmd->scsi_done(scmd);
4943 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4944 scmd->result = DID_NO_CONNECT << 16;
4945 scmd->scsi_done(scmd);
4949 sas_target_priv_data = sas_device_priv_data->sas_target;
4951 /* invalid device handle */
4952 handle = sas_target_priv_data->handle;
4953 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4954 scmd->result = DID_NO_CONNECT << 16;
4955 scmd->scsi_done(scmd);
4960 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4961 /* host recovery or link resets sent via IOCTLs */
4962 return SCSI_MLQUEUE_HOST_BUSY;
4963 } else if (sas_target_priv_data->deleted) {
4964 /* device has been deleted */
4965 scmd->result = DID_NO_CONNECT << 16;
4966 scmd->scsi_done(scmd);
4968 } else if (sas_target_priv_data->tm_busy ||
4969 sas_device_priv_data->block) {
4970 /* device busy with task management */
4971 return SCSI_MLQUEUE_DEVICE_BUSY;
4975 * Bug work around for firmware SATL handling. The loop
4976 * is based on atomic operations and ensures consistency
4977 * since we're lockless at this point
4980 if (test_bit(0, &sas_device_priv_data->ata_command_pending))
4981 return SCSI_MLQUEUE_DEVICE_BUSY;
4982 } while (_scsih_set_satl_pending(scmd, true));
4984 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4985 mpi_control = MPI2_SCSIIO_CONTROL_READ;
4986 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4987 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4989 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4992 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4993 /* NCQ Prio supported, make sure control indicated high priority */
4994 if (sas_device_priv_data->ncq_prio_enable) {
4995 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4996 if (class == IOPRIO_CLASS_RT)
4997 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4999 /* Make sure Device is not raid volume.
5000 * We do not expose raid functionality to upper layer for warpdrive.
5002 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5003 && !scsih_is_nvme(&scmd->device->sdev_gendev))
5004 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5005 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5007 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5009 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5010 _scsih_set_satl_pending(scmd, false);
5013 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5014 memset(mpi_request, 0, ioc->request_sz);
5015 _scsih_setup_eedp(ioc, scmd, mpi_request);
5017 if (scmd->cmd_len == 32)
5018 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5019 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5020 if (sas_device_priv_data->sas_target->flags &
5021 MPT_TARGET_FLAGS_RAID_COMPONENT)
5022 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5024 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5025 mpi_request->DevHandle = cpu_to_le16(handle);
5026 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5027 mpi_request->Control = cpu_to_le32(mpi_control);
5028 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5029 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5030 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5031 mpi_request->SenseBufferLowAddress =
5032 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5033 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5034 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5036 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5038 if (mpi_request->DataLength) {
5039 pcie_device = sas_target_priv_data->pcie_dev;
5040 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5041 mpt3sas_base_free_smid(ioc, smid);
5042 _scsih_set_satl_pending(scmd, false);
5046 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5048 raid_device = sas_target_priv_data->raid_device;
5049 if (raid_device && raid_device->direct_io_enabled)
5050 mpt3sas_setup_direct_io(ioc, scmd,
5051 raid_device, mpi_request);
5053 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5054 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5055 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5056 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5057 ioc->put_smid_fast_path(ioc, smid, handle);
5059 ioc->put_smid_scsi_io(ioc, smid,
5060 le16_to_cpu(mpi_request->DevHandle));
5062 ioc->put_smid_default(ioc, smid);
5066 return SCSI_MLQUEUE_HOST_BUSY;
5070 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5071 * @sense_buffer: sense data returned by target
5072 * @data: normalized skey/asc/ascq
5075 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5077 if ((sense_buffer[0] & 0x7F) >= 0x72) {
5078 /* descriptor format */
5079 data->skey = sense_buffer[1] & 0x0F;
5080 data->asc = sense_buffer[2];
5081 data->ascq = sense_buffer[3];
5084 data->skey = sense_buffer[2] & 0x0F;
5085 data->asc = sense_buffer[12];
5086 data->ascq = sense_buffer[13];
5091 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
5092 * @ioc: per adapter object
5093 * @scmd: pointer to scsi command object
5094 * @mpi_reply: reply mf payload returned from firmware
5097 * scsi_status - SCSI Status code returned from target device
5098 * scsi_state - state info associated with SCSI_IO determined by ioc
5099 * ioc_status - ioc supplied status info
5102 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5103 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5107 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5108 MPI2_IOCSTATUS_MASK;
5109 u8 scsi_state = mpi_reply->SCSIState;
5110 u8 scsi_status = mpi_reply->SCSIStatus;
5111 char *desc_ioc_state = NULL;
5112 char *desc_scsi_status = NULL;
5113 char *desc_scsi_state = ioc->tmp_string;
5114 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5115 struct _sas_device *sas_device = NULL;
5116 struct _pcie_device *pcie_device = NULL;
5117 struct scsi_target *starget = scmd->device->sdev_target;
5118 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5119 char *device_str = NULL;
5123 if (ioc->hide_ir_msg)
5124 device_str = "WarpDrive";
5126 device_str = "volume";
5128 if (log_info == 0x31170000)
5131 switch (ioc_status) {
5132 case MPI2_IOCSTATUS_SUCCESS:
5133 desc_ioc_state = "success";
5135 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5136 desc_ioc_state = "invalid function";
5138 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5139 desc_ioc_state = "scsi recovered error";
5141 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5142 desc_ioc_state = "scsi invalid dev handle";
5144 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5145 desc_ioc_state = "scsi device not there";
5147 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5148 desc_ioc_state = "scsi data overrun";
5150 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5151 desc_ioc_state = "scsi data underrun";
5153 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5154 desc_ioc_state = "scsi io data error";
5156 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5157 desc_ioc_state = "scsi protocol error";
5159 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5160 desc_ioc_state = "scsi task terminated";
5162 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5163 desc_ioc_state = "scsi residual mismatch";
5165 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5166 desc_ioc_state = "scsi task mgmt failed";
5168 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5169 desc_ioc_state = "scsi ioc terminated";
5171 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5172 desc_ioc_state = "scsi ext terminated";
5174 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5175 desc_ioc_state = "eedp guard error";
5177 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5178 desc_ioc_state = "eedp ref tag error";
5180 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5181 desc_ioc_state = "eedp app tag error";
5183 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5184 desc_ioc_state = "insufficient power";
5187 desc_ioc_state = "unknown";
5191 switch (scsi_status) {
5192 case MPI2_SCSI_STATUS_GOOD:
5193 desc_scsi_status = "good";
5195 case MPI2_SCSI_STATUS_CHECK_CONDITION:
5196 desc_scsi_status = "check condition";
5198 case MPI2_SCSI_STATUS_CONDITION_MET:
5199 desc_scsi_status = "condition met";
5201 case MPI2_SCSI_STATUS_BUSY:
5202 desc_scsi_status = "busy";
5204 case MPI2_SCSI_STATUS_INTERMEDIATE:
5205 desc_scsi_status = "intermediate";
5207 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5208 desc_scsi_status = "intermediate condmet";
5210 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5211 desc_scsi_status = "reservation conflict";
5213 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5214 desc_scsi_status = "command terminated";
5216 case MPI2_SCSI_STATUS_TASK_SET_FULL:
5217 desc_scsi_status = "task set full";
5219 case MPI2_SCSI_STATUS_ACA_ACTIVE:
5220 desc_scsi_status = "aca active";
5222 case MPI2_SCSI_STATUS_TASK_ABORTED:
5223 desc_scsi_status = "task aborted";
5226 desc_scsi_status = "unknown";
5230 desc_scsi_state[0] = '\0';
5232 desc_scsi_state = " ";
5233 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5234 strcat(desc_scsi_state, "response info ");
5235 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5236 strcat(desc_scsi_state, "state terminated ");
5237 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5238 strcat(desc_scsi_state, "no status ");
5239 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5240 strcat(desc_scsi_state, "autosense failed ");
5241 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5242 strcat(desc_scsi_state, "autosense valid ");
5244 scsi_print_command(scmd);
5246 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5247 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5248 device_str, (u64)priv_target->sas_address);
5249 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5250 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5252 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5253 (u64)pcie_device->wwid, pcie_device->port_num);
5254 if (pcie_device->enclosure_handle != 0)
5255 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5256 (u64)pcie_device->enclosure_logical_id,
5258 if (pcie_device->connector_name[0])
5259 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5260 pcie_device->enclosure_level,
5261 pcie_device->connector_name);
5262 pcie_device_put(pcie_device);
5265 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5267 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5268 (u64)sas_device->sas_address, sas_device->phy);
5270 _scsih_display_enclosure_chassis_info(ioc, sas_device,
5273 sas_device_put(sas_device);
5277 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5278 le16_to_cpu(mpi_reply->DevHandle),
5279 desc_ioc_state, ioc_status, smid);
5280 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5281 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5282 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5283 le16_to_cpu(mpi_reply->TaskTag),
5284 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5285 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5286 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5288 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5289 struct sense_info data;
5290 _scsih_normalize_sense(scmd->sense_buffer, &data);
5291 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5292 data.skey, data.asc, data.ascq,
5293 le32_to_cpu(mpi_reply->SenseCount));
5295 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5296 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5297 response_bytes = (u8 *)&response_info;
5298 _scsih_response_code(ioc, response_bytes[0]);
5303 * _scsih_turn_on_pfa_led - illuminate PFA LED
5304 * @ioc: per adapter object
5305 * @handle: device handle
5309 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5311 Mpi2SepReply_t mpi_reply;
5312 Mpi2SepRequest_t mpi_request;
5313 struct _sas_device *sas_device;
5315 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5319 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5320 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5321 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5322 mpi_request.SlotStatus =
5323 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5324 mpi_request.DevHandle = cpu_to_le16(handle);
5325 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5326 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5327 &mpi_request)) != 0) {
5328 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5329 __FILE__, __LINE__, __func__);
5332 sas_device->pfa_led_on = 1;
5334 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5336 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5337 le16_to_cpu(mpi_reply.IOCStatus),
5338 le32_to_cpu(mpi_reply.IOCLogInfo)));
5342 sas_device_put(sas_device);
5346 * _scsih_turn_off_pfa_led - turn off Fault LED
5347 * @ioc: per adapter object
5348 * @sas_device: sas device whose PFA LED has to turned off
5352 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5353 struct _sas_device *sas_device)
5355 Mpi2SepReply_t mpi_reply;
5356 Mpi2SepRequest_t mpi_request;
5358 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5359 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5360 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5361 mpi_request.SlotStatus = 0;
5362 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5363 mpi_request.DevHandle = 0;
5364 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5365 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5366 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5367 &mpi_request)) != 0) {
5368 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5369 __FILE__, __LINE__, __func__);
5373 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5375 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5376 le16_to_cpu(mpi_reply.IOCStatus),
5377 le32_to_cpu(mpi_reply.IOCLogInfo)));
5383 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5384 * @ioc: per adapter object
5385 * @handle: device handle
5386 * Context: interrupt.
5389 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5391 struct fw_event_work *fw_event;
5393 fw_event = alloc_fw_event_work(0);
5396 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5397 fw_event->device_handle = handle;
5398 fw_event->ioc = ioc;
5399 _scsih_fw_event_add(ioc, fw_event);
5400 fw_event_work_put(fw_event);
5404 * _scsih_smart_predicted_fault - process smart errors
5405 * @ioc: per adapter object
5406 * @handle: device handle
5407 * Context: interrupt.
5410 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5412 struct scsi_target *starget;
5413 struct MPT3SAS_TARGET *sas_target_priv_data;
5414 Mpi2EventNotificationReply_t *event_reply;
5415 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5416 struct _sas_device *sas_device;
5418 unsigned long flags;
5420 /* only handle non-raid devices */
5421 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5422 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5426 starget = sas_device->starget;
5427 sas_target_priv_data = starget->hostdata;
5429 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5430 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5433 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5435 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5437 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5438 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5440 /* insert into event log */
5441 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5442 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5443 event_reply = kzalloc(sz, GFP_ATOMIC);
5445 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5446 __FILE__, __LINE__, __func__);
5450 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5451 event_reply->Event =
5452 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5453 event_reply->MsgLength = sz/4;
5454 event_reply->EventDataLength =
5455 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5456 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5457 event_reply->EventData;
5458 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5459 event_data->ASC = 0x5D;
5460 event_data->DevHandle = cpu_to_le16(handle);
5461 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5462 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5466 sas_device_put(sas_device);
5470 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5475 * _scsih_io_done - scsi request callback
5476 * @ioc: per adapter object
5477 * @smid: system request message index
5478 * @msix_index: MSIX table index supplied by the OS
5479 * @reply: reply message frame(lower 32bit addr)
5481 * Callback handler when using _scsih_qcmd.
5483 * Return: 1 meaning mf should be freed from _base_interrupt
5484 * 0 means the mf is freed from this function.
5487 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5489 Mpi25SCSIIORequest_t *mpi_request;
5490 Mpi2SCSIIOReply_t *mpi_reply;
5491 struct scsi_cmnd *scmd;
5492 struct scsiio_tracker *st;
5498 struct MPT3SAS_DEVICE *sas_device_priv_data;
5499 u32 response_code = 0;
5501 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5503 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5507 _scsih_set_satl_pending(scmd, false);
5509 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5511 if (mpi_reply == NULL) {
5512 scmd->result = DID_OK << 16;
5516 sas_device_priv_data = scmd->device->hostdata;
5517 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5518 sas_device_priv_data->sas_target->deleted) {
5519 scmd->result = DID_NO_CONNECT << 16;
5522 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5525 * WARPDRIVE: If direct_io is set then it is directIO,
5526 * the failed direct I/O should be redirected to volume
5528 st = scsi_cmd_priv(scmd);
5529 if (st->direct_io &&
5530 ((ioc_status & MPI2_IOCSTATUS_MASK)
5531 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5534 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5535 mpi_request->DevHandle =
5536 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5537 ioc->put_smid_scsi_io(ioc, smid,
5538 sas_device_priv_data->sas_target->handle);
5541 /* turning off TLR */
5542 scsi_state = mpi_reply->SCSIState;
5543 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5545 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5546 if (!sas_device_priv_data->tlr_snoop_check) {
5547 sas_device_priv_data->tlr_snoop_check++;
5548 if ((!ioc->is_warpdrive &&
5549 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5550 !scsih_is_nvme(&scmd->device->sdev_gendev))
5551 && sas_is_tlr_enabled(scmd->device) &&
5552 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5553 sas_disable_tlr(scmd->device);
5554 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5558 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5559 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5560 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5561 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5564 ioc_status &= MPI2_IOCSTATUS_MASK;
5565 scsi_status = mpi_reply->SCSIStatus;
5567 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5568 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5569 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5570 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5571 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5574 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5575 struct sense_info data;
5576 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5578 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5579 le32_to_cpu(mpi_reply->SenseCount));
5580 memcpy(scmd->sense_buffer, sense_data, sz);
5581 _scsih_normalize_sense(scmd->sense_buffer, &data);
5582 /* failure prediction threshold exceeded */
5583 if (data.asc == 0x5D)
5584 _scsih_smart_predicted_fault(ioc,
5585 le16_to_cpu(mpi_reply->DevHandle));
5586 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5588 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5589 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5590 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5591 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5592 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5594 switch (ioc_status) {
5595 case MPI2_IOCSTATUS_BUSY:
5596 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5597 scmd->result = SAM_STAT_BUSY;
5600 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5601 scmd->result = DID_NO_CONNECT << 16;
5604 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5605 if (sas_device_priv_data->block) {
5606 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5609 if (log_info == 0x31110630) {
5610 if (scmd->retries > 2) {
5611 scmd->result = DID_NO_CONNECT << 16;
5612 scsi_device_set_state(scmd->device,
5615 scmd->result = DID_SOFT_ERROR << 16;
5616 scmd->device->expecting_cc_ua = 1;
5619 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5620 scmd->result = DID_RESET << 16;
5622 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5623 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5624 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5625 scmd->result = DID_RESET << 16;
5628 scmd->result = DID_SOFT_ERROR << 16;
5630 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5631 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5632 scmd->result = DID_RESET << 16;
5635 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5636 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5637 scmd->result = DID_SOFT_ERROR << 16;
5639 scmd->result = (DID_OK << 16) | scsi_status;
5642 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5643 scmd->result = (DID_OK << 16) | scsi_status;
5645 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5648 if (xfer_cnt < scmd->underflow) {
5649 if (scsi_status == SAM_STAT_BUSY)
5650 scmd->result = SAM_STAT_BUSY;
5652 scmd->result = DID_SOFT_ERROR << 16;
5653 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5654 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5655 scmd->result = DID_SOFT_ERROR << 16;
5656 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5657 scmd->result = DID_RESET << 16;
5658 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5659 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5660 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5661 scmd->result = (DRIVER_SENSE << 24) |
5662 SAM_STAT_CHECK_CONDITION;
5663 scmd->sense_buffer[0] = 0x70;
5664 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5665 scmd->sense_buffer[12] = 0x20;
5666 scmd->sense_buffer[13] = 0;
5670 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5671 scsi_set_resid(scmd, 0);
5673 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5674 case MPI2_IOCSTATUS_SUCCESS:
5675 scmd->result = (DID_OK << 16) | scsi_status;
5676 if (response_code ==
5677 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5678 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5679 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5680 scmd->result = DID_SOFT_ERROR << 16;
5681 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5682 scmd->result = DID_RESET << 16;
5685 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5686 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5687 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5688 _scsih_eedp_error_handling(scmd, ioc_status);
5691 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5692 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5693 case MPI2_IOCSTATUS_INVALID_SGL:
5694 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5695 case MPI2_IOCSTATUS_INVALID_FIELD:
5696 case MPI2_IOCSTATUS_INVALID_STATE:
5697 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5698 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5699 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5701 scmd->result = DID_SOFT_ERROR << 16;
5706 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5707 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5711 scsi_dma_unmap(scmd);
5712 mpt3sas_base_free_smid(ioc, smid);
5713 scmd->scsi_done(scmd);
5718 * _scsih_sas_host_refresh - refreshing sas host object contents
5719 * @ioc: per adapter object
5722 * During port enable, fw will send topology events for every device. Its
5723 * possible that the handles may change from the previous setting, so this
5724 * code keeping handles updating if changed.
5727 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5732 Mpi2ConfigReply_t mpi_reply;
5733 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5734 u16 attached_handle;
5738 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5739 (u64)ioc->sas_hba.sas_address));
5741 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5742 * sizeof(Mpi2SasIOUnit0PhyData_t));
5743 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5744 if (!sas_iounit_pg0) {
5745 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5746 __FILE__, __LINE__, __func__);
5750 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5751 sas_iounit_pg0, sz)) != 0)
5753 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5754 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5756 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5757 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5759 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5760 PhyData[0].ControllerDevHandle);
5761 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5762 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5764 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5765 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5766 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5767 attached_handle, i, link_rate);
5770 kfree(sas_iounit_pg0);
5774 * _scsih_sas_host_add - create sas host object
5775 * @ioc: per adapter object
5777 * Creating host side data object, stored in ioc->sas_hba
5780 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5783 Mpi2ConfigReply_t mpi_reply;
5784 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5785 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5786 Mpi2SasPhyPage0_t phy_pg0;
5787 Mpi2SasDevicePage0_t sas_device_pg0;
5788 Mpi2SasEnclosurePage0_t enclosure_pg0;
5791 u8 device_missing_delay;
5794 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5796 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5797 __FILE__, __LINE__, __func__);
5800 ioc->sas_hba.phy = kcalloc(num_phys,
5801 sizeof(struct _sas_phy), GFP_KERNEL);
5802 if (!ioc->sas_hba.phy) {
5803 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5804 __FILE__, __LINE__, __func__);
5807 ioc->sas_hba.num_phys = num_phys;
5809 /* sas_iounit page 0 */
5810 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5811 sizeof(Mpi2SasIOUnit0PhyData_t));
5812 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5813 if (!sas_iounit_pg0) {
5814 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5815 __FILE__, __LINE__, __func__);
5818 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5819 sas_iounit_pg0, sz))) {
5820 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5821 __FILE__, __LINE__, __func__);
5824 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5825 MPI2_IOCSTATUS_MASK;
5826 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5827 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5828 __FILE__, __LINE__, __func__);
5832 /* sas_iounit page 1 */
5833 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5834 sizeof(Mpi2SasIOUnit1PhyData_t));
5835 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5836 if (!sas_iounit_pg1) {
5837 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5838 __FILE__, __LINE__, __func__);
5841 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5842 sas_iounit_pg1, sz))) {
5843 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5844 __FILE__, __LINE__, __func__);
5847 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5848 MPI2_IOCSTATUS_MASK;
5849 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5850 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5851 __FILE__, __LINE__, __func__);
5855 ioc->io_missing_delay =
5856 sas_iounit_pg1->IODeviceMissingDelay;
5857 device_missing_delay =
5858 sas_iounit_pg1->ReportDeviceMissingDelay;
5859 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5860 ioc->device_missing_delay = (device_missing_delay &
5861 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5863 ioc->device_missing_delay = device_missing_delay &
5864 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5866 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5867 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5868 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5870 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5871 __FILE__, __LINE__, __func__);
5874 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5875 MPI2_IOCSTATUS_MASK;
5876 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5877 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5878 __FILE__, __LINE__, __func__);
5883 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5884 PhyData[0].ControllerDevHandle);
5885 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5886 ioc->sas_hba.phy[i].phy_id = i;
5887 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5888 phy_pg0, ioc->sas_hba.parent_dev);
5890 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5891 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5892 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5893 __FILE__, __LINE__, __func__);
5896 ioc->sas_hba.enclosure_handle =
5897 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5898 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5899 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5900 ioc->sas_hba.handle,
5901 (u64)ioc->sas_hba.sas_address,
5902 ioc->sas_hba.num_phys);
5904 if (ioc->sas_hba.enclosure_handle) {
5905 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5906 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5907 ioc->sas_hba.enclosure_handle)))
5908 ioc->sas_hba.enclosure_logical_id =
5909 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5913 kfree(sas_iounit_pg1);
5914 kfree(sas_iounit_pg0);
5918 * _scsih_expander_add - creating expander object
5919 * @ioc: per adapter object
5920 * @handle: expander handle
5922 * Creating expander object, stored in ioc->sas_expander_list.
5924 * Return: 0 for success, else error.
5927 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5929 struct _sas_node *sas_expander;
5930 struct _enclosure_node *enclosure_dev;
5931 Mpi2ConfigReply_t mpi_reply;
5932 Mpi2ExpanderPage0_t expander_pg0;
5933 Mpi2ExpanderPage1_t expander_pg1;
5936 u64 sas_address, sas_address_parent = 0;
5938 unsigned long flags;
5939 struct _sas_port *mpt3sas_port = NULL;
5946 if (ioc->shost_recovery || ioc->pci_error_recovery)
5949 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5950 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5951 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5952 __FILE__, __LINE__, __func__);
5956 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5957 MPI2_IOCSTATUS_MASK;
5958 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5959 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5960 __FILE__, __LINE__, __func__);
5964 /* handle out of order topology events */
5965 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5966 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5968 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5969 __FILE__, __LINE__, __func__);
5972 if (sas_address_parent != ioc->sas_hba.sas_address) {
5973 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5974 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5975 sas_address_parent);
5976 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5977 if (!sas_expander) {
5978 rc = _scsih_expander_add(ioc, parent_handle);
5984 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5985 sas_address = le64_to_cpu(expander_pg0.SASAddress);
5986 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5988 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5993 sas_expander = kzalloc(sizeof(struct _sas_node),
5995 if (!sas_expander) {
5996 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5997 __FILE__, __LINE__, __func__);
6001 sas_expander->handle = handle;
6002 sas_expander->num_phys = expander_pg0.NumPhys;
6003 sas_expander->sas_address_parent = sas_address_parent;
6004 sas_expander->sas_address = sas_address;
6006 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6007 handle, parent_handle,
6008 (u64)sas_expander->sas_address, sas_expander->num_phys);
6010 if (!sas_expander->num_phys)
6012 sas_expander->phy = kcalloc(sas_expander->num_phys,
6013 sizeof(struct _sas_phy), GFP_KERNEL);
6014 if (!sas_expander->phy) {
6015 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6016 __FILE__, __LINE__, __func__);
6021 INIT_LIST_HEAD(&sas_expander->sas_port_list);
6022 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6023 sas_address_parent);
6024 if (!mpt3sas_port) {
6025 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6026 __FILE__, __LINE__, __func__);
6030 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6032 for (i = 0 ; i < sas_expander->num_phys ; i++) {
6033 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6034 &expander_pg1, i, handle))) {
6035 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6036 __FILE__, __LINE__, __func__);
6040 sas_expander->phy[i].handle = handle;
6041 sas_expander->phy[i].phy_id = i;
6043 if ((mpt3sas_transport_add_expander_phy(ioc,
6044 &sas_expander->phy[i], expander_pg1,
6045 sas_expander->parent_dev))) {
6046 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6047 __FILE__, __LINE__, __func__);
6053 if (sas_expander->enclosure_handle) {
6055 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6056 sas_expander->enclosure_handle);
6058 sas_expander->enclosure_logical_id =
6059 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6062 _scsih_expander_node_add(ioc, sas_expander);
6068 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
6069 sas_address_parent);
6070 kfree(sas_expander);
6075 * mpt3sas_expander_remove - removing expander object
6076 * @ioc: per adapter object
6077 * @sas_address: expander sas_address
6080 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
6082 struct _sas_node *sas_expander;
6083 unsigned long flags;
6085 if (ioc->shost_recovery)
6088 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6089 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6091 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6093 _scsih_expander_node_remove(ioc, sas_expander);
6097 * _scsih_done - internal SCSI_IO callback handler.
6098 * @ioc: per adapter object
6099 * @smid: system request message index
6100 * @msix_index: MSIX table index supplied by the OS
6101 * @reply: reply message frame(lower 32bit addr)
6103 * Callback handler when sending internal generated SCSI_IO.
6104 * The callback index passed is `ioc->scsih_cb_idx`
6106 * Return: 1 meaning mf should be freed from _base_interrupt
6107 * 0 means the mf is freed from this function.
6110 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
6112 MPI2DefaultReply_t *mpi_reply;
6114 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6115 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
6117 if (ioc->scsih_cmds.smid != smid)
6119 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
6121 memcpy(ioc->scsih_cmds.reply, mpi_reply,
6122 mpi_reply->MsgLength*4);
6123 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
6125 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
6126 complete(&ioc->scsih_cmds.done);
6133 #define MPT3_MAX_LUNS (255)
6137 * _scsih_check_access_status - check access flags
6138 * @ioc: per adapter object
6139 * @sas_address: sas address
6140 * @handle: sas device handle
6141 * @access_status: errors returned during discovery of the device
6143 * Return: 0 for success, else failure
6146 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
6147 u16 handle, u8 access_status)
6152 switch (access_status) {
6153 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
6154 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
6157 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
6158 desc = "sata capability failed";
6160 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
6161 desc = "sata affiliation conflict";
6163 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
6164 desc = "route not addressable";
6166 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
6167 desc = "smp error not addressable";
6169 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
6170 desc = "device blocked";
6172 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
6173 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
6174 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
6175 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
6176 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
6177 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
6178 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
6179 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
6180 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
6181 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
6182 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
6183 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
6184 desc = "sata initialization failed";
6194 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
6195 desc, (u64)sas_address, handle);
6200 * _scsih_check_device - checking device responsiveness
6201 * @ioc: per adapter object
6202 * @parent_sas_address: sas address of parent expander or sas host
6203 * @handle: attached device handle
6204 * @phy_number: phy number
6205 * @link_rate: new link rate
6208 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
6209 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
6211 Mpi2ConfigReply_t mpi_reply;
6212 Mpi2SasDevicePage0_t sas_device_pg0;
6213 struct _sas_device *sas_device;
6214 struct _enclosure_node *enclosure_dev = NULL;
6216 unsigned long flags;
6218 struct scsi_target *starget;
6219 struct MPT3SAS_TARGET *sas_target_priv_data;
6222 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6223 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
6226 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6227 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6230 /* wide port handling ~ we need only handle device once for the phy that
6231 * is matched in sas device page zero
6233 if (phy_number != sas_device_pg0.PhyNum)
6236 /* check if this is end device */
6237 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6238 if (!(_scsih_is_end_device(device_info)))
6241 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6242 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6243 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6249 if (unlikely(sas_device->handle != handle)) {
6250 starget = sas_device->starget;
6251 sas_target_priv_data = starget->hostdata;
6252 starget_printk(KERN_INFO, starget,
6253 "handle changed from(0x%04x) to (0x%04x)!!!\n",
6254 sas_device->handle, handle);
6255 sas_target_priv_data->handle = handle;
6256 sas_device->handle = handle;
6257 if (le16_to_cpu(sas_device_pg0.Flags) &
6258 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6259 sas_device->enclosure_level =
6260 sas_device_pg0.EnclosureLevel;
6261 memcpy(sas_device->connector_name,
6262 sas_device_pg0.ConnectorName, 4);
6263 sas_device->connector_name[4] = '\0';
6265 sas_device->enclosure_level = 0;
6266 sas_device->connector_name[0] = '\0';
6269 sas_device->enclosure_handle =
6270 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6271 sas_device->is_chassis_slot_valid = 0;
6272 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
6273 sas_device->enclosure_handle);
6274 if (enclosure_dev) {
6275 sas_device->enclosure_logical_id =
6276 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6277 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6278 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6279 sas_device->is_chassis_slot_valid = 1;
6280 sas_device->chassis_slot =
6281 enclosure_dev->pg0.ChassisSlot;
6286 /* check if device is present */
6287 if (!(le16_to_cpu(sas_device_pg0.Flags) &
6288 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6289 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
6294 /* check if there were any issues with discovery */
6295 if (_scsih_check_access_status(ioc, sas_address, handle,
6296 sas_device_pg0.AccessStatus))
6299 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6300 _scsih_ublock_io_device(ioc, sas_address);
6303 sas_device_put(sas_device);
6307 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6309 sas_device_put(sas_device);
6313 * _scsih_add_device - creating sas device object
6314 * @ioc: per adapter object
6315 * @handle: sas device handle
6316 * @phy_num: phy number end device attached to
6317 * @is_pd: is this hidden raid component
6319 * Creating end device object, stored in ioc->sas_device_list.
6321 * Return: 0 for success, non-zero for failure.
6324 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6327 Mpi2ConfigReply_t mpi_reply;
6328 Mpi2SasDevicePage0_t sas_device_pg0;
6329 struct _sas_device *sas_device;
6330 struct _enclosure_node *enclosure_dev = NULL;
6335 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6336 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6337 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6338 __FILE__, __LINE__, __func__);
6342 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6343 MPI2_IOCSTATUS_MASK;
6344 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6345 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6346 __FILE__, __LINE__, __func__);
6350 /* check if this is end device */
6351 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6352 if (!(_scsih_is_end_device(device_info)))
6354 set_bit(handle, ioc->pend_os_device_add);
6355 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6357 /* check if device is present */
6358 if (!(le16_to_cpu(sas_device_pg0.Flags) &
6359 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6360 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6365 /* check if there were any issues with discovery */
6366 if (_scsih_check_access_status(ioc, sas_address, handle,
6367 sas_device_pg0.AccessStatus))
6370 sas_device = mpt3sas_get_sdev_by_addr(ioc,
6373 clear_bit(handle, ioc->pend_os_device_add);
6374 sas_device_put(sas_device);
6378 if (sas_device_pg0.EnclosureHandle) {
6380 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6381 le16_to_cpu(sas_device_pg0.EnclosureHandle));
6382 if (enclosure_dev == NULL)
6383 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6384 sas_device_pg0.EnclosureHandle);
6387 sas_device = kzalloc(sizeof(struct _sas_device),
6390 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6391 __FILE__, __LINE__, __func__);
6395 kref_init(&sas_device->refcount);
6396 sas_device->handle = handle;
6397 if (_scsih_get_sas_address(ioc,
6398 le16_to_cpu(sas_device_pg0.ParentDevHandle),
6399 &sas_device->sas_address_parent) != 0)
6400 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6401 __FILE__, __LINE__, __func__);
6402 sas_device->enclosure_handle =
6403 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6404 if (sas_device->enclosure_handle != 0)
6406 le16_to_cpu(sas_device_pg0.Slot);
6407 sas_device->device_info = device_info;
6408 sas_device->sas_address = sas_address;
6409 sas_device->phy = sas_device_pg0.PhyNum;
6410 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6411 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6413 if (le16_to_cpu(sas_device_pg0.Flags)
6414 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6415 sas_device->enclosure_level =
6416 sas_device_pg0.EnclosureLevel;
6417 memcpy(sas_device->connector_name,
6418 sas_device_pg0.ConnectorName, 4);
6419 sas_device->connector_name[4] = '\0';
6421 sas_device->enclosure_level = 0;
6422 sas_device->connector_name[0] = '\0';
6424 /* get enclosure_logical_id & chassis_slot*/
6425 sas_device->is_chassis_slot_valid = 0;
6426 if (enclosure_dev) {
6427 sas_device->enclosure_logical_id =
6428 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6429 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6430 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6431 sas_device->is_chassis_slot_valid = 1;
6432 sas_device->chassis_slot =
6433 enclosure_dev->pg0.ChassisSlot;
6437 /* get device name */
6438 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6440 if (ioc->wait_for_discovery_to_complete)
6441 _scsih_sas_device_init_add(ioc, sas_device);
6443 _scsih_sas_device_add(ioc, sas_device);
6445 sas_device_put(sas_device);
6450 * _scsih_remove_device - removing sas device object
6451 * @ioc: per adapter object
6452 * @sas_device: the sas_device object
6455 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6456 struct _sas_device *sas_device)
6458 struct MPT3SAS_TARGET *sas_target_priv_data;
6460 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6461 (sas_device->pfa_led_on)) {
6462 _scsih_turn_off_pfa_led(ioc, sas_device);
6463 sas_device->pfa_led_on = 0;
6467 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6469 sas_device->handle, (u64)sas_device->sas_address));
6471 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6474 if (sas_device->starget && sas_device->starget->hostdata) {
6475 sas_target_priv_data = sas_device->starget->hostdata;
6476 sas_target_priv_data->deleted = 1;
6477 _scsih_ublock_io_device(ioc, sas_device->sas_address);
6478 sas_target_priv_data->handle =
6479 MPT3SAS_INVALID_DEVICE_HANDLE;
6482 if (!ioc->hide_drives)
6483 mpt3sas_transport_port_remove(ioc,
6484 sas_device->sas_address,
6485 sas_device->sas_address_parent);
6487 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6488 sas_device->handle, (u64)sas_device->sas_address);
6490 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6493 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6495 sas_device->handle, (u64)sas_device->sas_address));
6496 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6501 * _scsih_sas_topology_change_event_debug - debug for topology event
6502 * @ioc: per adapter object
6503 * @event_data: event data payload
6507 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6508 Mpi2EventDataSasTopologyChangeList_t *event_data)
6514 char *status_str = NULL;
6515 u8 link_rate, prev_link_rate;
6517 switch (event_data->ExpStatus) {
6518 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6521 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6522 status_str = "remove";
6524 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6526 status_str = "responding";
6528 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6529 status_str = "remove delay";
6532 status_str = "unknown status";
6535 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6536 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6537 "start_phy(%02d), count(%d)\n",
6538 le16_to_cpu(event_data->ExpanderDevHandle),
6539 le16_to_cpu(event_data->EnclosureHandle),
6540 event_data->StartPhyNum, event_data->NumEntries);
6541 for (i = 0; i < event_data->NumEntries; i++) {
6542 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6545 phy_number = event_data->StartPhyNum + i;
6546 reason_code = event_data->PHY[i].PhyStatus &
6547 MPI2_EVENT_SAS_TOPO_RC_MASK;
6548 switch (reason_code) {
6549 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6550 status_str = "target add";
6552 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6553 status_str = "target remove";
6555 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6556 status_str = "delay target remove";
6558 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6559 status_str = "link rate change";
6561 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6562 status_str = "target responding";
6565 status_str = "unknown";
6568 link_rate = event_data->PHY[i].LinkRate >> 4;
6569 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6570 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6571 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6572 handle, status_str, link_rate, prev_link_rate);
6578 * _scsih_sas_topology_change_event - handle topology changes
6579 * @ioc: per adapter object
6580 * @fw_event: The fw_event_work object
6585 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6586 struct fw_event_work *fw_event)
6589 u16 parent_handle, handle;
6591 u8 phy_number, max_phys;
6592 struct _sas_node *sas_expander;
6594 unsigned long flags;
6595 u8 link_rate, prev_link_rate;
6596 Mpi2EventDataSasTopologyChangeList_t *event_data =
6597 (Mpi2EventDataSasTopologyChangeList_t *)
6598 fw_event->event_data;
6600 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6601 _scsih_sas_topology_change_event_debug(ioc, event_data);
6603 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6606 if (!ioc->sas_hba.num_phys)
6607 _scsih_sas_host_add(ioc);
6609 _scsih_sas_host_refresh(ioc);
6611 if (fw_event->ignore) {
6612 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6616 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6618 /* handle expander add */
6619 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6620 if (_scsih_expander_add(ioc, parent_handle) != 0)
6623 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6624 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6627 sas_address = sas_expander->sas_address;
6628 max_phys = sas_expander->num_phys;
6629 } else if (parent_handle < ioc->sas_hba.num_phys) {
6630 sas_address = ioc->sas_hba.sas_address;
6631 max_phys = ioc->sas_hba.num_phys;
6633 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6636 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6638 /* handle siblings events */
6639 for (i = 0; i < event_data->NumEntries; i++) {
6640 if (fw_event->ignore) {
6642 ioc_info(ioc, "ignoring expander event\n"));
6645 if (ioc->remove_host || ioc->pci_error_recovery)
6647 phy_number = event_data->StartPhyNum + i;
6648 if (phy_number >= max_phys)
6650 reason_code = event_data->PHY[i].PhyStatus &
6651 MPI2_EVENT_SAS_TOPO_RC_MASK;
6652 if ((event_data->PHY[i].PhyStatus &
6653 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6654 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6656 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6659 link_rate = event_data->PHY[i].LinkRate >> 4;
6660 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6661 switch (reason_code) {
6662 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6664 if (ioc->shost_recovery)
6667 if (link_rate == prev_link_rate)
6670 mpt3sas_transport_update_links(ioc, sas_address,
6671 handle, phy_number, link_rate);
6673 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6676 _scsih_check_device(ioc, sas_address, handle,
6677 phy_number, link_rate);
6679 if (!test_bit(handle, ioc->pend_os_device_add))
6684 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6686 if (ioc->shost_recovery)
6689 mpt3sas_transport_update_links(ioc, sas_address,
6690 handle, phy_number, link_rate);
6692 _scsih_add_device(ioc, handle, phy_number, 0);
6695 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6697 _scsih_device_remove_by_handle(ioc, handle);
6702 /* handle expander removal */
6703 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6705 mpt3sas_expander_remove(ioc, sas_address);
6711 * _scsih_sas_device_status_change_event_debug - debug for device event
6713 * @event_data: event data payload
6717 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6718 Mpi2EventDataSasDeviceStatusChange_t *event_data)
6720 char *reason_str = NULL;
6722 switch (event_data->ReasonCode) {
6723 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6724 reason_str = "smart data";
6726 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6727 reason_str = "unsupported device discovered";
6729 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6730 reason_str = "internal device reset";
6732 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6733 reason_str = "internal task abort";
6735 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6736 reason_str = "internal task abort set";
6738 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6739 reason_str = "internal clear task set";
6741 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6742 reason_str = "internal query task";
6744 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6745 reason_str = "sata init failure";
6747 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6748 reason_str = "internal device reset complete";
6750 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6751 reason_str = "internal task abort complete";
6753 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6754 reason_str = "internal async notification";
6756 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6757 reason_str = "expander reduced functionality";
6759 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6760 reason_str = "expander reduced functionality complete";
6763 reason_str = "unknown reason";
6766 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6767 reason_str, le16_to_cpu(event_data->DevHandle),
6768 (u64)le64_to_cpu(event_data->SASAddress),
6769 le16_to_cpu(event_data->TaskTag));
6770 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6771 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6772 event_data->ASC, event_data->ASCQ);
6777 * _scsih_sas_device_status_change_event - handle device status change
6778 * @ioc: per adapter object
6779 * @event_data: The fw event
6783 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6784 Mpi2EventDataSasDeviceStatusChange_t *event_data)
6786 struct MPT3SAS_TARGET *target_priv_data;
6787 struct _sas_device *sas_device;
6789 unsigned long flags;
6791 /* In MPI Revision K (0xC), the internal device reset complete was
6792 * implemented, so avoid setting tm_busy flag for older firmware.
6794 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6797 if (event_data->ReasonCode !=
6798 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6799 event_data->ReasonCode !=
6800 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6803 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6804 sas_address = le64_to_cpu(event_data->SASAddress);
6805 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6808 if (!sas_device || !sas_device->starget)
6811 target_priv_data = sas_device->starget->hostdata;
6812 if (!target_priv_data)
6815 if (event_data->ReasonCode ==
6816 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6817 target_priv_data->tm_busy = 1;
6819 target_priv_data->tm_busy = 0;
6821 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6823 "%s tm_busy flag for handle(0x%04x)\n",
6824 (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
6825 target_priv_data->handle);
6829 sas_device_put(sas_device);
6831 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6836 * _scsih_check_pcie_access_status - check access flags
6837 * @ioc: per adapter object
6839 * @handle: sas device handle
6840 * @access_status: errors returned during discovery of the device
6842 * Return: 0 for success, else failure
6845 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6846 u16 handle, u8 access_status)
6851 switch (access_status) {
6852 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6853 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6856 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6857 desc = "PCIe device capability failed";
6859 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6860 desc = "PCIe device blocked";
6862 "Device with Access Status (%s): wwid(0x%016llx), "
6863 "handle(0x%04x)\n ll only be added to the internal list",
6864 desc, (u64)wwid, handle);
6867 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6868 desc = "PCIe device mem space access failed";
6870 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6871 desc = "PCIe device unsupported";
6873 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6874 desc = "PCIe device MSIx Required";
6876 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6877 desc = "PCIe device init fail max";
6879 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6880 desc = "PCIe device status unknown";
6882 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6883 desc = "nvme ready timeout";
6885 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6886 desc = "nvme device configuration unsupported";
6888 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6889 desc = "nvme identify failed";
6891 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6892 desc = "nvme qconfig failed";
6894 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6895 desc = "nvme qcreation failed";
6897 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6898 desc = "nvme eventcfg failed";
6900 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6901 desc = "nvme get feature stat failed";
6903 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6904 desc = "nvme idle timeout";
6906 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6907 desc = "nvme failure status";
6910 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6911 access_status, (u64)wwid, handle);
6918 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6919 desc, (u64)wwid, handle);
6924 * _scsih_pcie_device_remove_from_sml - removing pcie device
6925 * from SML and free up associated memory
6926 * @ioc: per adapter object
6927 * @pcie_device: the pcie_device object
6930 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6931 struct _pcie_device *pcie_device)
6933 struct MPT3SAS_TARGET *sas_target_priv_data;
6936 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6938 pcie_device->handle, (u64)pcie_device->wwid));
6939 if (pcie_device->enclosure_handle != 0)
6941 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6943 (u64)pcie_device->enclosure_logical_id,
6944 pcie_device->slot));
6945 if (pcie_device->connector_name[0] != '\0')
6947 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6949 pcie_device->enclosure_level,
6950 pcie_device->connector_name));
6952 if (pcie_device->starget && pcie_device->starget->hostdata) {
6953 sas_target_priv_data = pcie_device->starget->hostdata;
6954 sas_target_priv_data->deleted = 1;
6955 _scsih_ublock_io_device(ioc, pcie_device->wwid);
6956 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6959 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6960 pcie_device->handle, (u64)pcie_device->wwid);
6961 if (pcie_device->enclosure_handle != 0)
6962 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6963 (u64)pcie_device->enclosure_logical_id,
6965 if (pcie_device->connector_name[0] != '\0')
6966 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6967 pcie_device->enclosure_level,
6968 pcie_device->connector_name);
6970 if (pcie_device->starget && (pcie_device->access_status !=
6971 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
6972 scsi_remove_target(&pcie_device->starget->dev);
6974 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6976 pcie_device->handle, (u64)pcie_device->wwid));
6977 if (pcie_device->enclosure_handle != 0)
6979 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6981 (u64)pcie_device->enclosure_logical_id,
6982 pcie_device->slot));
6983 if (pcie_device->connector_name[0] != '\0')
6985 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6987 pcie_device->enclosure_level,
6988 pcie_device->connector_name));
6990 kfree(pcie_device->serial_number);
6995 * _scsih_pcie_check_device - checking device responsiveness
6996 * @ioc: per adapter object
6997 * @handle: attached device handle
7000 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7002 Mpi2ConfigReply_t mpi_reply;
7003 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7005 struct _pcie_device *pcie_device;
7007 unsigned long flags;
7008 struct scsi_target *starget;
7009 struct MPT3SAS_TARGET *sas_target_priv_data;
7012 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7013 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
7016 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7017 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7020 /* check if this is end device */
7021 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7022 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
7025 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7026 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7027 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7030 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7034 if (unlikely(pcie_device->handle != handle)) {
7035 starget = pcie_device->starget;
7036 sas_target_priv_data = starget->hostdata;
7037 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7038 starget_printk(KERN_INFO, starget,
7039 "handle changed from(0x%04x) to (0x%04x)!!!\n",
7040 pcie_device->handle, handle);
7041 sas_target_priv_data->handle = handle;
7042 pcie_device->handle = handle;
7044 if (le32_to_cpu(pcie_device_pg0.Flags) &
7045 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7046 pcie_device->enclosure_level =
7047 pcie_device_pg0.EnclosureLevel;
7048 memcpy(&pcie_device->connector_name[0],
7049 &pcie_device_pg0.ConnectorName[0], 4);
7051 pcie_device->enclosure_level = 0;
7052 pcie_device->connector_name[0] = '\0';
7056 /* check if device is present */
7057 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7058 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7059 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
7061 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7062 pcie_device_put(pcie_device);
7066 /* check if there were any issues with discovery */
7067 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7068 pcie_device_pg0.AccessStatus)) {
7069 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7070 pcie_device_put(pcie_device);
7074 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7075 pcie_device_put(pcie_device);
7077 _scsih_ublock_io_device(ioc, wwid);
7083 * _scsih_pcie_add_device - creating pcie device object
7084 * @ioc: per adapter object
7085 * @handle: pcie device handle
7087 * Creating end device object, stored in ioc->pcie_device_list.
7089 * Return: 1 means queue the event later, 0 means complete the event
7092 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7094 Mpi26PCIeDevicePage0_t pcie_device_pg0;
7095 Mpi26PCIeDevicePage2_t pcie_device_pg2;
7096 Mpi2ConfigReply_t mpi_reply;
7097 struct _pcie_device *pcie_device;
7098 struct _enclosure_node *enclosure_dev;
7102 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7103 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
7104 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7105 __FILE__, __LINE__, __func__);
7108 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7109 MPI2_IOCSTATUS_MASK;
7110 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7111 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7112 __FILE__, __LINE__, __func__);
7116 set_bit(handle, ioc->pend_os_device_add);
7117 wwid = le64_to_cpu(pcie_device_pg0.WWID);
7119 /* check if device is present */
7120 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
7121 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
7122 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7127 /* check if there were any issues with discovery */
7128 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
7129 pcie_device_pg0.AccessStatus))
7132 if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
7133 (pcie_device_pg0.DeviceInfo))))
7136 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
7138 clear_bit(handle, ioc->pend_os_device_add);
7139 pcie_device_put(pcie_device);
7143 /* PCIe Device Page 2 contains read-only information about a
7144 * specific NVMe device; therefore, this page is only
7145 * valid for NVMe devices and skip for pcie devices of type scsi.
7147 if (!(mpt3sas_scsih_is_pcie_scsi_device(
7148 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
7149 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
7150 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
7153 "failure at %s:%d/%s()!\n", __FILE__,
7154 __LINE__, __func__);
7158 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7159 MPI2_IOCSTATUS_MASK;
7160 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7162 "failure at %s:%d/%s()!\n", __FILE__,
7163 __LINE__, __func__);
7168 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
7170 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7171 __FILE__, __LINE__, __func__);
7175 kref_init(&pcie_device->refcount);
7176 pcie_device->id = ioc->pcie_target_id++;
7177 pcie_device->channel = PCIE_CHANNEL;
7178 pcie_device->handle = handle;
7179 pcie_device->access_status = pcie_device_pg0.AccessStatus;
7180 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
7181 pcie_device->wwid = wwid;
7182 pcie_device->port_num = pcie_device_pg0.PortNum;
7183 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
7184 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7186 pcie_device->enclosure_handle =
7187 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
7188 if (pcie_device->enclosure_handle != 0)
7189 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
7191 if (le32_to_cpu(pcie_device_pg0.Flags) &
7192 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
7193 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
7194 memcpy(&pcie_device->connector_name[0],
7195 &pcie_device_pg0.ConnectorName[0], 4);
7197 pcie_device->enclosure_level = 0;
7198 pcie_device->connector_name[0] = '\0';
7201 /* get enclosure_logical_id */
7202 if (pcie_device->enclosure_handle) {
7204 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7205 pcie_device->enclosure_handle);
7207 pcie_device->enclosure_logical_id =
7208 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7210 /* TODO -- Add device name once FW supports it */
7211 if (!(mpt3sas_scsih_is_pcie_scsi_device(
7212 le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
7213 pcie_device->nvme_mdts =
7214 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
7215 pcie_device->shutdown_latency =
7216 le16_to_cpu(pcie_device_pg2.ShutdownLatency);
7218 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
7219 * if drive's RTD3 Entry Latency is greater then IOC's
7220 * max_shutdown_latency.
7222 if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
7223 ioc->max_shutdown_latency =
7224 pcie_device->shutdown_latency;
7225 if (pcie_device_pg2.ControllerResetTO)
7226 pcie_device->reset_timeout =
7227 pcie_device_pg2.ControllerResetTO;
7229 pcie_device->reset_timeout = 30;
7231 pcie_device->reset_timeout = 30;
7233 if (ioc->wait_for_discovery_to_complete)
7234 _scsih_pcie_device_init_add(ioc, pcie_device);
7236 _scsih_pcie_device_add(ioc, pcie_device);
7238 pcie_device_put(pcie_device);
7243 * _scsih_pcie_topology_change_event_debug - debug for topology
7245 * @ioc: per adapter object
7246 * @event_data: event data payload
7250 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7251 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
7257 char *status_str = NULL;
7258 u8 link_rate, prev_link_rate;
7260 switch (event_data->SwitchStatus) {
7261 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
7264 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
7265 status_str = "remove";
7267 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
7269 status_str = "responding";
7271 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
7272 status_str = "remove delay";
7275 status_str = "unknown status";
7278 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
7279 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
7280 "start_port(%02d), count(%d)\n",
7281 le16_to_cpu(event_data->SwitchDevHandle),
7282 le16_to_cpu(event_data->EnclosureHandle),
7283 event_data->StartPortNum, event_data->NumEntries);
7284 for (i = 0; i < event_data->NumEntries; i++) {
7286 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7289 port_number = event_data->StartPortNum + i;
7290 reason_code = event_data->PortEntry[i].PortStatus;
7291 switch (reason_code) {
7292 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7293 status_str = "target add";
7295 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7296 status_str = "target remove";
7298 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
7299 status_str = "delay target remove";
7301 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7302 status_str = "link rate change";
7304 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
7305 status_str = "target responding";
7308 status_str = "unknown";
7311 link_rate = event_data->PortEntry[i].CurrentPortInfo &
7312 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7313 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
7314 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7315 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
7316 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
7317 handle, status_str, link_rate, prev_link_rate);
7322 * _scsih_pcie_topology_change_event - handle PCIe topology
7324 * @ioc: per adapter object
7325 * @fw_event: The fw_event_work object
7330 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7331 struct fw_event_work *fw_event)
7336 u8 link_rate, prev_link_rate;
7337 unsigned long flags;
7339 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
7340 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
7341 struct _pcie_device *pcie_device;
7343 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7344 _scsih_pcie_topology_change_event_debug(ioc, event_data);
7346 if (ioc->shost_recovery || ioc->remove_host ||
7347 ioc->pci_error_recovery)
7350 if (fw_event->ignore) {
7351 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
7355 /* handle siblings events */
7356 for (i = 0; i < event_data->NumEntries; i++) {
7357 if (fw_event->ignore) {
7359 ioc_info(ioc, "ignoring switch event\n"));
7362 if (ioc->remove_host || ioc->pci_error_recovery)
7364 reason_code = event_data->PortEntry[i].PortStatus;
7366 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7370 link_rate = event_data->PortEntry[i].CurrentPortInfo
7371 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7372 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7373 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7375 switch (reason_code) {
7376 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7377 if (ioc->shost_recovery)
7379 if (link_rate == prev_link_rate)
7381 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7384 _scsih_pcie_check_device(ioc, handle);
7386 /* This code after this point handles the test case
7387 * where a device has been added, however its returning
7388 * BUSY for sometime. Then before the Device Missing
7389 * Delay expires and the device becomes READY, the
7390 * device is removed and added back.
7392 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7393 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7394 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7397 pcie_device_put(pcie_device);
7401 if (!test_bit(handle, ioc->pend_os_device_add))
7405 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7407 event_data->PortEntry[i].PortStatus &= 0xF0;
7408 event_data->PortEntry[i].PortStatus |=
7409 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7411 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7412 if (ioc->shost_recovery)
7414 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7417 rc = _scsih_pcie_add_device(ioc, handle);
7419 /* mark entry vacant */
7420 /* TODO This needs to be reviewed and fixed,
7421 * we dont have an entry
7422 * to make an event void like vacant
7424 event_data->PortEntry[i].PortStatus |=
7425 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7428 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7429 _scsih_pcie_device_remove_by_handle(ioc, handle);
7436 * _scsih_pcie_device_status_change_event_debug - debug for device event
7438 * @event_data: event data payload
7442 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7443 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7445 char *reason_str = NULL;
7447 switch (event_data->ReasonCode) {
7448 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7449 reason_str = "smart data";
7451 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7452 reason_str = "unsupported device discovered";
7454 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7455 reason_str = "internal device reset";
7457 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7458 reason_str = "internal task abort";
7460 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7461 reason_str = "internal task abort set";
7463 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7464 reason_str = "internal clear task set";
7466 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7467 reason_str = "internal query task";
7469 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7470 reason_str = "device init failure";
7472 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7473 reason_str = "internal device reset complete";
7475 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7476 reason_str = "internal task abort complete";
7478 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7479 reason_str = "internal async notification";
7481 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7482 reason_str = "pcie hot reset failed";
7485 reason_str = "unknown reason";
7489 ioc_info(ioc, "PCIE device status change: (%s)\n"
7490 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7491 reason_str, le16_to_cpu(event_data->DevHandle),
7492 (u64)le64_to_cpu(event_data->WWID),
7493 le16_to_cpu(event_data->TaskTag));
7494 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7495 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7496 event_data->ASC, event_data->ASCQ);
7501 * _scsih_pcie_device_status_change_event - handle device status
7503 * @ioc: per adapter object
7504 * @fw_event: The fw_event_work object
7508 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7509 struct fw_event_work *fw_event)
7511 struct MPT3SAS_TARGET *target_priv_data;
7512 struct _pcie_device *pcie_device;
7514 unsigned long flags;
7515 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7516 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7517 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7518 _scsih_pcie_device_status_change_event_debug(ioc,
7521 if (event_data->ReasonCode !=
7522 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7523 event_data->ReasonCode !=
7524 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7527 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7528 wwid = le64_to_cpu(event_data->WWID);
7529 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7531 if (!pcie_device || !pcie_device->starget)
7534 target_priv_data = pcie_device->starget->hostdata;
7535 if (!target_priv_data)
7538 if (event_data->ReasonCode ==
7539 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7540 target_priv_data->tm_busy = 1;
7542 target_priv_data->tm_busy = 0;
7545 pcie_device_put(pcie_device);
7547 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7551 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7553 * @ioc: per adapter object
7554 * @event_data: event data payload
7558 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7559 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7561 char *reason_str = NULL;
7563 switch (event_data->ReasonCode) {
7564 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7565 reason_str = "enclosure add";
7567 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7568 reason_str = "enclosure remove";
7571 reason_str = "unknown reason";
7575 ioc_info(ioc, "enclosure status change: (%s)\n"
7576 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7578 le16_to_cpu(event_data->EnclosureHandle),
7579 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7580 le16_to_cpu(event_data->StartSlot));
7584 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7585 * @ioc: per adapter object
7586 * @fw_event: The fw_event_work object
7590 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7591 struct fw_event_work *fw_event)
7593 Mpi2ConfigReply_t mpi_reply;
7594 struct _enclosure_node *enclosure_dev = NULL;
7595 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7596 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7598 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7600 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7601 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7602 (Mpi2EventDataSasEnclDevStatusChange_t *)
7603 fw_event->event_data);
7604 if (ioc->shost_recovery)
7607 if (enclosure_handle)
7609 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7611 switch (event_data->ReasonCode) {
7612 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7613 if (!enclosure_dev) {
7615 kzalloc(sizeof(struct _enclosure_node),
7617 if (!enclosure_dev) {
7618 ioc_info(ioc, "failure at %s:%d/%s()!\n",
7619 __FILE__, __LINE__, __func__);
7622 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7623 &enclosure_dev->pg0,
7624 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7627 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7628 MPI2_IOCSTATUS_MASK)) {
7629 kfree(enclosure_dev);
7633 list_add_tail(&enclosure_dev->list,
7634 &ioc->enclosure_list);
7637 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7638 if (enclosure_dev) {
7639 list_del(&enclosure_dev->list);
7640 kfree(enclosure_dev);
7649 * _scsih_sas_broadcast_primitive_event - handle broadcast events
7650 * @ioc: per adapter object
7651 * @fw_event: The fw_event_work object
7655 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7656 struct fw_event_work *fw_event)
7658 struct scsi_cmnd *scmd;
7659 struct scsi_device *sdev;
7660 struct scsiio_tracker *st;
7663 struct MPT3SAS_DEVICE *sas_device_priv_data;
7664 u32 termination_count;
7666 Mpi2SCSITaskManagementReply_t *mpi_reply;
7667 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7668 (Mpi2EventDataSasBroadcastPrimitive_t *)
7669 fw_event->event_data;
7671 unsigned long flags;
7674 u8 task_abort_retries;
7676 mutex_lock(&ioc->tm_cmds.mutex);
7677 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7678 __func__, event_data->PhyNum, event_data->PortWidth);
7680 _scsih_block_io_all_device(ioc);
7682 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7683 mpi_reply = ioc->tm_cmds.reply;
7684 broadcast_aen_retry:
7686 /* sanity checks for retrying this loop */
7687 if (max_retries++ == 5) {
7688 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7690 } else if (max_retries > 1)
7692 ioc_info(ioc, "%s: %d retry\n",
7693 __func__, max_retries - 1));
7695 termination_count = 0;
7697 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7698 if (ioc->shost_recovery)
7700 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7703 st = scsi_cmd_priv(scmd);
7704 sdev = scmd->device;
7705 sas_device_priv_data = sdev->hostdata;
7706 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7708 /* skip hidden raid components */
7709 if (sas_device_priv_data->sas_target->flags &
7710 MPT_TARGET_FLAGS_RAID_COMPONENT)
7713 if (sas_device_priv_data->sas_target->flags &
7714 MPT_TARGET_FLAGS_VOLUME)
7716 /* skip PCIe devices */
7717 if (sas_device_priv_data->sas_target->flags &
7718 MPT_TARGET_FLAGS_PCIE_DEVICE)
7721 handle = sas_device_priv_data->sas_target->handle;
7722 lun = sas_device_priv_data->lun;
7725 if (ioc->shost_recovery)
7728 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7729 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
7730 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7731 st->msix_io, 30, 0);
7733 sdev_printk(KERN_WARNING, sdev,
7734 "mpt3sas_scsih_issue_tm: FAILED when sending "
7735 "QUERY_TASK: scmd(%p)\n", scmd);
7736 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7737 goto broadcast_aen_retry;
7739 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7740 & MPI2_IOCSTATUS_MASK;
7741 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7742 sdev_printk(KERN_WARNING, sdev,
7743 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7745 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7746 goto broadcast_aen_retry;
7749 /* see if IO is still owned by IOC and target */
7750 if (mpi_reply->ResponseCode ==
7751 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7752 mpi_reply->ResponseCode ==
7753 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7754 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7757 task_abort_retries = 0;
7759 if (task_abort_retries++ == 60) {
7761 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7763 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7764 goto broadcast_aen_retry;
7767 if (ioc->shost_recovery)
7770 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
7771 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
7772 st->smid, st->msix_io, 30, 0);
7773 if (r == FAILED || st->cb_idx != 0xFF) {
7774 sdev_printk(KERN_WARNING, sdev,
7775 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7776 "scmd(%p)\n", scmd);
7780 if (task_abort_retries > 1)
7781 sdev_printk(KERN_WARNING, sdev,
7782 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7784 task_abort_retries - 1, scmd);
7786 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7787 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7790 if (ioc->broadcast_aen_pending) {
7793 "%s: loop back due to pending AEN\n",
7795 ioc->broadcast_aen_pending = 0;
7796 goto broadcast_aen_retry;
7800 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7804 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7805 __func__, query_count, termination_count));
7807 ioc->broadcast_aen_busy = 0;
7808 if (!ioc->shost_recovery)
7809 _scsih_ublock_io_all_device(ioc);
7810 mutex_unlock(&ioc->tm_cmds.mutex);
7814 * _scsih_sas_discovery_event - handle discovery events
7815 * @ioc: per adapter object
7816 * @fw_event: The fw_event_work object
7820 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7821 struct fw_event_work *fw_event)
7823 Mpi2EventDataSasDiscovery_t *event_data =
7824 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7826 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7827 ioc_info(ioc, "discovery event: (%s)",
7828 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7830 if (event_data->DiscoveryStatus)
7831 pr_cont("discovery_status(0x%08x)",
7832 le32_to_cpu(event_data->DiscoveryStatus));
7836 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7837 !ioc->sas_hba.num_phys) {
7838 if (disable_discovery > 0 && ioc->shost_recovery) {
7839 /* Wait for the reset to complete */
7840 while (ioc->shost_recovery)
7843 _scsih_sas_host_add(ioc);
7848 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7850 * @ioc: per adapter object
7851 * @fw_event: The fw_event_work object
7855 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7856 struct fw_event_work *fw_event)
7858 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7859 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7861 switch (event_data->ReasonCode) {
7862 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7863 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7864 le16_to_cpu(event_data->DevHandle),
7865 (u64)le64_to_cpu(event_data->SASAddress),
7866 event_data->PhysicalPort);
7868 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7869 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7870 le16_to_cpu(event_data->DevHandle),
7871 (u64)le64_to_cpu(event_data->SASAddress),
7872 event_data->PhysicalPort);
7880 * _scsih_pcie_enumeration_event - handle enumeration events
7881 * @ioc: per adapter object
7882 * @fw_event: The fw_event_work object
7886 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7887 struct fw_event_work *fw_event)
7889 Mpi26EventDataPCIeEnumeration_t *event_data =
7890 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7892 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7895 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7896 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7897 "started" : "completed",
7899 if (event_data->EnumerationStatus)
7900 pr_cont("enumeration_status(0x%08x)",
7901 le32_to_cpu(event_data->EnumerationStatus));
7906 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7907 * @ioc: per adapter object
7908 * @handle: device handle for physical disk
7909 * @phys_disk_num: physical disk number
7911 * Return: 0 for success, else failure.
7914 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7916 Mpi2RaidActionRequest_t *mpi_request;
7917 Mpi2RaidActionReply_t *mpi_reply;
7924 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7927 mutex_lock(&ioc->scsih_cmds.mutex);
7929 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7930 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7934 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7936 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7938 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7939 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7944 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7945 ioc->scsih_cmds.smid = smid;
7946 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7948 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7949 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7950 mpi_request->PhysDiskNum = phys_disk_num;
7953 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7954 handle, phys_disk_num));
7956 init_completion(&ioc->scsih_cmds.done);
7957 ioc->put_smid_default(ioc, smid);
7958 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7960 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7961 mpt3sas_check_cmd_timeout(ioc,
7962 ioc->scsih_cmds.status, mpi_request,
7963 sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
7968 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7970 mpi_reply = ioc->scsih_cmds.reply;
7971 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7972 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7973 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
7976 ioc_status &= MPI2_IOCSTATUS_MASK;
7977 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7979 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7980 ioc_status, log_info));
7984 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7988 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7989 mutex_unlock(&ioc->scsih_cmds.mutex);
7992 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7997 * _scsih_reprobe_lun - reprobing lun
7998 * @sdev: scsi device struct
7999 * @no_uld_attach: sdev->no_uld_attach flag setting
8003 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8005 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8006 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8007 sdev->no_uld_attach ? "hiding" : "exposing");
8008 WARN_ON(scsi_device_reprobe(sdev));
8012 * _scsih_sas_volume_add - add new volume
8013 * @ioc: per adapter object
8014 * @element: IR config element data
8018 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
8019 Mpi2EventIrConfigElement_t *element)
8021 struct _raid_device *raid_device;
8022 unsigned long flags;
8024 u16 handle = le16_to_cpu(element->VolDevHandle);
8027 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8029 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8030 __FILE__, __LINE__, __func__);
8034 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8035 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
8036 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8041 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8043 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8044 __FILE__, __LINE__, __func__);
8048 raid_device->id = ioc->sas_id++;
8049 raid_device->channel = RAID_CHANNEL;
8050 raid_device->handle = handle;
8051 raid_device->wwid = wwid;
8052 _scsih_raid_device_add(ioc, raid_device);
8053 if (!ioc->wait_for_discovery_to_complete) {
8054 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8055 raid_device->id, 0);
8057 _scsih_raid_device_remove(ioc, raid_device);
8059 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8060 _scsih_determine_boot_device(ioc, raid_device, 1);
8061 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8066 * _scsih_sas_volume_delete - delete volume
8067 * @ioc: per adapter object
8068 * @handle: volume device handle
8072 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8074 struct _raid_device *raid_device;
8075 unsigned long flags;
8076 struct MPT3SAS_TARGET *sas_target_priv_data;
8077 struct scsi_target *starget = NULL;
8079 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8080 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8082 if (raid_device->starget) {
8083 starget = raid_device->starget;
8084 sas_target_priv_data = starget->hostdata;
8085 sas_target_priv_data->deleted = 1;
8087 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
8088 raid_device->handle, (u64)raid_device->wwid);
8089 list_del(&raid_device->list);
8092 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8094 scsi_remove_target(&starget->dev);
8098 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
8099 * @ioc: per adapter object
8100 * @element: IR config element data
8104 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
8105 Mpi2EventIrConfigElement_t *element)
8107 struct _sas_device *sas_device;
8108 struct scsi_target *starget = NULL;
8109 struct MPT3SAS_TARGET *sas_target_priv_data;
8110 unsigned long flags;
8111 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8113 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8114 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
8116 sas_device->volume_handle = 0;
8117 sas_device->volume_wwid = 0;
8118 clear_bit(handle, ioc->pd_handles);
8119 if (sas_device->starget && sas_device->starget->hostdata) {
8120 starget = sas_device->starget;
8121 sas_target_priv_data = starget->hostdata;
8122 sas_target_priv_data->flags &=
8123 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
8126 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8130 /* exposing raid component */
8132 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
8134 sas_device_put(sas_device);
8138 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
8139 * @ioc: per adapter object
8140 * @element: IR config element data
8144 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
8145 Mpi2EventIrConfigElement_t *element)
8147 struct _sas_device *sas_device;
8148 struct scsi_target *starget = NULL;
8149 struct MPT3SAS_TARGET *sas_target_priv_data;
8150 unsigned long flags;
8151 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8152 u16 volume_handle = 0;
8153 u64 volume_wwid = 0;
8155 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
8157 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
8160 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8161 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
8163 set_bit(handle, ioc->pd_handles);
8164 if (sas_device->starget && sas_device->starget->hostdata) {
8165 starget = sas_device->starget;
8166 sas_target_priv_data = starget->hostdata;
8167 sas_target_priv_data->flags |=
8168 MPT_TARGET_FLAGS_RAID_COMPONENT;
8169 sas_device->volume_handle = volume_handle;
8170 sas_device->volume_wwid = volume_wwid;
8173 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8177 /* hiding raid component */
8178 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8181 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
8183 sas_device_put(sas_device);
8187 * _scsih_sas_pd_delete - delete pd component
8188 * @ioc: per adapter object
8189 * @element: IR config element data
8193 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
8194 Mpi2EventIrConfigElement_t *element)
8196 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8198 _scsih_device_remove_by_handle(ioc, handle);
8202 * _scsih_sas_pd_add - remove pd component
8203 * @ioc: per adapter object
8204 * @element: IR config element data
8208 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
8209 Mpi2EventIrConfigElement_t *element)
8211 struct _sas_device *sas_device;
8212 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
8213 Mpi2ConfigReply_t mpi_reply;
8214 Mpi2SasDevicePage0_t sas_device_pg0;
8219 set_bit(handle, ioc->pd_handles);
8221 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8223 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8224 sas_device_put(sas_device);
8228 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
8229 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
8230 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8231 __FILE__, __LINE__, __func__);
8235 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8236 MPI2_IOCSTATUS_MASK;
8237 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8238 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8239 __FILE__, __LINE__, __func__);
8243 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8244 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8245 mpt3sas_transport_update_links(ioc, sas_address, handle,
8246 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8248 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
8249 _scsih_add_device(ioc, handle, 0, 1);
8253 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
8254 * @ioc: per adapter object
8255 * @event_data: event data payload
8259 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8260 Mpi2EventDataIrConfigChangeList_t *event_data)
8262 Mpi2EventIrConfigElement_t *element;
8265 char *reason_str = NULL, *element_str = NULL;
8267 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8269 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
8270 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
8271 "foreign" : "native",
8272 event_data->NumElements);
8273 for (i = 0; i < event_data->NumElements; i++, element++) {
8274 switch (element->ReasonCode) {
8275 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8278 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8279 reason_str = "remove";
8281 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
8282 reason_str = "no change";
8284 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8285 reason_str = "hide";
8287 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8288 reason_str = "unhide";
8290 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8291 reason_str = "volume_created";
8293 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8294 reason_str = "volume_deleted";
8296 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8297 reason_str = "pd_created";
8299 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8300 reason_str = "pd_deleted";
8303 reason_str = "unknown reason";
8306 element_type = le16_to_cpu(element->ElementFlags) &
8307 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
8308 switch (element_type) {
8309 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
8310 element_str = "volume";
8312 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
8313 element_str = "phys disk";
8315 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
8316 element_str = "hot spare";
8319 element_str = "unknown element";
8322 pr_info("\t(%s:%s), vol handle(0x%04x), " \
8323 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
8324 reason_str, le16_to_cpu(element->VolDevHandle),
8325 le16_to_cpu(element->PhysDiskDevHandle),
8326 element->PhysDiskNum);
8331 * _scsih_sas_ir_config_change_event - handle ir configuration change events
8332 * @ioc: per adapter object
8333 * @fw_event: The fw_event_work object
8337 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
8338 struct fw_event_work *fw_event)
8340 Mpi2EventIrConfigElement_t *element;
8343 Mpi2EventDataIrConfigChangeList_t *event_data =
8344 (Mpi2EventDataIrConfigChangeList_t *)
8345 fw_event->event_data;
8347 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8348 (!ioc->hide_ir_msg))
8349 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
8351 foreign_config = (le32_to_cpu(event_data->Flags) &
8352 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8354 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8355 if (ioc->shost_recovery &&
8356 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8357 for (i = 0; i < event_data->NumElements; i++, element++) {
8358 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8359 _scsih_ir_fastpath(ioc,
8360 le16_to_cpu(element->PhysDiskDevHandle),
8361 element->PhysDiskNum);
8366 for (i = 0; i < event_data->NumElements; i++, element++) {
8368 switch (element->ReasonCode) {
8369 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8370 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8371 if (!foreign_config)
8372 _scsih_sas_volume_add(ioc, element);
8374 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8375 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8376 if (!foreign_config)
8377 _scsih_sas_volume_delete(ioc,
8378 le16_to_cpu(element->VolDevHandle));
8380 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8381 if (!ioc->is_warpdrive)
8382 _scsih_sas_pd_hide(ioc, element);
8384 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8385 if (!ioc->is_warpdrive)
8386 _scsih_sas_pd_expose(ioc, element);
8388 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8389 if (!ioc->is_warpdrive)
8390 _scsih_sas_pd_add(ioc, element);
8392 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8393 if (!ioc->is_warpdrive)
8394 _scsih_sas_pd_delete(ioc, element);
8401 * _scsih_sas_ir_volume_event - IR volume event
8402 * @ioc: per adapter object
8403 * @fw_event: The fw_event_work object
8407 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8408 struct fw_event_work *fw_event)
8411 unsigned long flags;
8412 struct _raid_device *raid_device;
8416 Mpi2EventDataIrVolume_t *event_data =
8417 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
8419 if (ioc->shost_recovery)
8422 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8425 handle = le16_to_cpu(event_data->VolDevHandle);
8426 state = le32_to_cpu(event_data->NewValue);
8427 if (!ioc->hide_ir_msg)
8429 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8431 le32_to_cpu(event_data->PreviousValue),
8434 case MPI2_RAID_VOL_STATE_MISSING:
8435 case MPI2_RAID_VOL_STATE_FAILED:
8436 _scsih_sas_volume_delete(ioc, handle);
8439 case MPI2_RAID_VOL_STATE_ONLINE:
8440 case MPI2_RAID_VOL_STATE_DEGRADED:
8441 case MPI2_RAID_VOL_STATE_OPTIMAL:
8443 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8444 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8445 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8450 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8452 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8453 __FILE__, __LINE__, __func__);
8457 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8459 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8460 __FILE__, __LINE__, __func__);
8464 raid_device->id = ioc->sas_id++;
8465 raid_device->channel = RAID_CHANNEL;
8466 raid_device->handle = handle;
8467 raid_device->wwid = wwid;
8468 _scsih_raid_device_add(ioc, raid_device);
8469 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8470 raid_device->id, 0);
8472 _scsih_raid_device_remove(ioc, raid_device);
8475 case MPI2_RAID_VOL_STATE_INITIALIZING:
8482 * _scsih_sas_ir_physical_disk_event - PD event
8483 * @ioc: per adapter object
8484 * @fw_event: The fw_event_work object
8488 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8489 struct fw_event_work *fw_event)
8491 u16 handle, parent_handle;
8493 struct _sas_device *sas_device;
8494 Mpi2ConfigReply_t mpi_reply;
8495 Mpi2SasDevicePage0_t sas_device_pg0;
8497 Mpi2EventDataIrPhysicalDisk_t *event_data =
8498 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8501 if (ioc->shost_recovery)
8504 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8507 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8508 state = le32_to_cpu(event_data->NewValue);
8510 if (!ioc->hide_ir_msg)
8512 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8514 le32_to_cpu(event_data->PreviousValue),
8518 case MPI2_RAID_PD_STATE_ONLINE:
8519 case MPI2_RAID_PD_STATE_DEGRADED:
8520 case MPI2_RAID_PD_STATE_REBUILDING:
8521 case MPI2_RAID_PD_STATE_OPTIMAL:
8522 case MPI2_RAID_PD_STATE_HOT_SPARE:
8524 if (!ioc->is_warpdrive)
8525 set_bit(handle, ioc->pd_handles);
8527 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8529 sas_device_put(sas_device);
8533 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8534 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8536 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8537 __FILE__, __LINE__, __func__);
8541 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8542 MPI2_IOCSTATUS_MASK;
8543 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8544 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8545 __FILE__, __LINE__, __func__);
8549 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8550 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8551 mpt3sas_transport_update_links(ioc, sas_address, handle,
8552 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8554 _scsih_add_device(ioc, handle, 0, 1);
8558 case MPI2_RAID_PD_STATE_OFFLINE:
8559 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8560 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8567 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8568 * @ioc: per adapter object
8569 * @event_data: event data payload
8573 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8574 Mpi2EventDataIrOperationStatus_t *event_data)
8576 char *reason_str = NULL;
8578 switch (event_data->RAIDOperation) {
8579 case MPI2_EVENT_IR_RAIDOP_RESYNC:
8580 reason_str = "resync";
8582 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8583 reason_str = "online capacity expansion";
8585 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8586 reason_str = "consistency check";
8588 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8589 reason_str = "background init";
8591 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8592 reason_str = "make data consistent";
8599 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8601 le16_to_cpu(event_data->VolDevHandle),
8602 event_data->PercentComplete);
8606 * _scsih_sas_ir_operation_status_event - handle RAID operation events
8607 * @ioc: per adapter object
8608 * @fw_event: The fw_event_work object
8612 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8613 struct fw_event_work *fw_event)
8615 Mpi2EventDataIrOperationStatus_t *event_data =
8616 (Mpi2EventDataIrOperationStatus_t *)
8617 fw_event->event_data;
8618 static struct _raid_device *raid_device;
8619 unsigned long flags;
8622 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8623 (!ioc->hide_ir_msg))
8624 _scsih_sas_ir_operation_status_event_debug(ioc,
8627 /* code added for raid transport support */
8628 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8630 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8631 handle = le16_to_cpu(event_data->VolDevHandle);
8632 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8634 raid_device->percent_complete =
8635 event_data->PercentComplete;
8636 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8641 * _scsih_prep_device_scan - initialize parameters prior to device scan
8642 * @ioc: per adapter object
8644 * Set the deleted flag prior to device scan. If the device is found during
8645 * the scan, then we clear the deleted flag.
8648 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8650 struct MPT3SAS_DEVICE *sas_device_priv_data;
8651 struct scsi_device *sdev;
8653 shost_for_each_device(sdev, ioc->shost) {
8654 sas_device_priv_data = sdev->hostdata;
8655 if (sas_device_priv_data && sas_device_priv_data->sas_target)
8656 sas_device_priv_data->sas_target->deleted = 1;
8661 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8662 * @ioc: per adapter object
8663 * @sas_device_pg0: SAS Device page 0
8665 * After host reset, find out whether devices are still responding.
8666 * Used in _scsih_remove_unresponsive_sas_devices.
8669 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8670 Mpi2SasDevicePage0_t *sas_device_pg0)
8672 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8673 struct scsi_target *starget;
8674 struct _sas_device *sas_device = NULL;
8675 struct _enclosure_node *enclosure_dev = NULL;
8676 unsigned long flags;
8678 if (sas_device_pg0->EnclosureHandle) {
8680 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8681 le16_to_cpu(sas_device_pg0->EnclosureHandle));
8682 if (enclosure_dev == NULL)
8683 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8684 sas_device_pg0->EnclosureHandle);
8686 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8687 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8688 if ((sas_device->sas_address == le64_to_cpu(
8689 sas_device_pg0->SASAddress)) && (sas_device->slot ==
8690 le16_to_cpu(sas_device_pg0->Slot))) {
8691 sas_device->responding = 1;
8692 starget = sas_device->starget;
8693 if (starget && starget->hostdata) {
8694 sas_target_priv_data = starget->hostdata;
8695 sas_target_priv_data->tm_busy = 0;
8696 sas_target_priv_data->deleted = 0;
8698 sas_target_priv_data = NULL;
8700 starget_printk(KERN_INFO, starget,
8701 "handle(0x%04x), sas_addr(0x%016llx)\n",
8702 le16_to_cpu(sas_device_pg0->DevHandle),
8703 (unsigned long long)
8704 sas_device->sas_address);
8706 if (sas_device->enclosure_handle != 0)
8707 starget_printk(KERN_INFO, starget,
8708 "enclosure logical id(0x%016llx),"
8710 (unsigned long long)
8711 sas_device->enclosure_logical_id,
8714 if (le16_to_cpu(sas_device_pg0->Flags) &
8715 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8716 sas_device->enclosure_level =
8717 sas_device_pg0->EnclosureLevel;
8718 memcpy(&sas_device->connector_name[0],
8719 &sas_device_pg0->ConnectorName[0], 4);
8721 sas_device->enclosure_level = 0;
8722 sas_device->connector_name[0] = '\0';
8725 sas_device->enclosure_handle =
8726 le16_to_cpu(sas_device_pg0->EnclosureHandle);
8727 sas_device->is_chassis_slot_valid = 0;
8728 if (enclosure_dev) {
8729 sas_device->enclosure_logical_id = le64_to_cpu(
8730 enclosure_dev->pg0.EnclosureLogicalID);
8731 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8732 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8733 sas_device->is_chassis_slot_valid = 1;
8734 sas_device->chassis_slot =
8735 enclosure_dev->pg0.ChassisSlot;
8739 if (sas_device->handle == le16_to_cpu(
8740 sas_device_pg0->DevHandle))
8742 pr_info("\thandle changed from(0x%04x)!!!\n",
8743 sas_device->handle);
8744 sas_device->handle = le16_to_cpu(
8745 sas_device_pg0->DevHandle);
8746 if (sas_target_priv_data)
8747 sas_target_priv_data->handle =
8748 le16_to_cpu(sas_device_pg0->DevHandle);
8753 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8757 * _scsih_create_enclosure_list_after_reset - Free Existing list,
8758 * And create enclosure list by scanning all Enclosure Page(0)s
8759 * @ioc: per adapter object
8762 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8764 struct _enclosure_node *enclosure_dev;
8765 Mpi2ConfigReply_t mpi_reply;
8766 u16 enclosure_handle;
8769 /* Free existing enclosure list */
8770 mpt3sas_free_enclosure_list(ioc);
8772 /* Re constructing enclosure list after reset*/
8773 enclosure_handle = 0xFFFF;
8776 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8777 if (!enclosure_dev) {
8778 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8779 __FILE__, __LINE__, __func__);
8782 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8783 &enclosure_dev->pg0,
8784 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8787 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8788 MPI2_IOCSTATUS_MASK)) {
8789 kfree(enclosure_dev);
8792 list_add_tail(&enclosure_dev->list,
8793 &ioc->enclosure_list);
8795 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8800 * _scsih_search_responding_sas_devices -
8801 * @ioc: per adapter object
8803 * After host reset, find out whether devices are still responding.
8807 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8809 Mpi2SasDevicePage0_t sas_device_pg0;
8810 Mpi2ConfigReply_t mpi_reply;
8815 ioc_info(ioc, "search for end-devices: start\n");
8817 if (list_empty(&ioc->sas_device_list))
8821 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8822 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8824 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8825 MPI2_IOCSTATUS_MASK;
8826 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8828 handle = le16_to_cpu(sas_device_pg0.DevHandle);
8829 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8830 if (!(_scsih_is_end_device(device_info)))
8832 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8836 ioc_info(ioc, "search for end-devices: complete\n");
8840 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8841 * @ioc: per adapter object
8842 * @pcie_device_pg0: PCIe Device page 0
8844 * After host reset, find out whether devices are still responding.
8845 * Used in _scsih_remove_unresponding_devices.
8848 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8849 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8851 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8852 struct scsi_target *starget;
8853 struct _pcie_device *pcie_device;
8854 unsigned long flags;
8856 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8857 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8858 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8859 && (pcie_device->slot == le16_to_cpu(
8860 pcie_device_pg0->Slot))) {
8861 pcie_device->access_status =
8862 pcie_device_pg0->AccessStatus;
8863 pcie_device->responding = 1;
8864 starget = pcie_device->starget;
8865 if (starget && starget->hostdata) {
8866 sas_target_priv_data = starget->hostdata;
8867 sas_target_priv_data->tm_busy = 0;
8868 sas_target_priv_data->deleted = 0;
8870 sas_target_priv_data = NULL;
8872 starget_printk(KERN_INFO, starget,
8873 "handle(0x%04x), wwid(0x%016llx) ",
8874 pcie_device->handle,
8875 (unsigned long long)pcie_device->wwid);
8876 if (pcie_device->enclosure_handle != 0)
8877 starget_printk(KERN_INFO, starget,
8878 "enclosure logical id(0x%016llx), "
8880 (unsigned long long)
8881 pcie_device->enclosure_logical_id,
8885 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8886 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8887 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8888 pcie_device->enclosure_level =
8889 pcie_device_pg0->EnclosureLevel;
8890 memcpy(&pcie_device->connector_name[0],
8891 &pcie_device_pg0->ConnectorName[0], 4);
8893 pcie_device->enclosure_level = 0;
8894 pcie_device->connector_name[0] = '\0';
8897 if (pcie_device->handle == le16_to_cpu(
8898 pcie_device_pg0->DevHandle))
8900 pr_info("\thandle changed from(0x%04x)!!!\n",
8901 pcie_device->handle);
8902 pcie_device->handle = le16_to_cpu(
8903 pcie_device_pg0->DevHandle);
8904 if (sas_target_priv_data)
8905 sas_target_priv_data->handle =
8906 le16_to_cpu(pcie_device_pg0->DevHandle);
8912 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8916 * _scsih_search_responding_pcie_devices -
8917 * @ioc: per adapter object
8919 * After host reset, find out whether devices are still responding.
8923 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8925 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8926 Mpi2ConfigReply_t mpi_reply;
8931 ioc_info(ioc, "search for end-devices: start\n");
8933 if (list_empty(&ioc->pcie_device_list))
8937 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8938 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8940 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8941 MPI2_IOCSTATUS_MASK;
8942 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8943 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8944 __func__, ioc_status,
8945 le32_to_cpu(mpi_reply.IOCLogInfo));
8948 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8949 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8950 if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8952 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8955 ioc_info(ioc, "search for PCIe end-devices: complete\n");
8959 * _scsih_mark_responding_raid_device - mark a raid_device as responding
8960 * @ioc: per adapter object
8961 * @wwid: world wide identifier for raid volume
8962 * @handle: device handle
8964 * After host reset, find out whether devices are still responding.
8965 * Used in _scsih_remove_unresponsive_raid_devices.
8968 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8971 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8972 struct scsi_target *starget;
8973 struct _raid_device *raid_device;
8974 unsigned long flags;
8976 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8977 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8978 if (raid_device->wwid == wwid && raid_device->starget) {
8979 starget = raid_device->starget;
8980 if (starget && starget->hostdata) {
8981 sas_target_priv_data = starget->hostdata;
8982 sas_target_priv_data->deleted = 0;
8984 sas_target_priv_data = NULL;
8985 raid_device->responding = 1;
8986 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8987 starget_printk(KERN_INFO, raid_device->starget,
8988 "handle(0x%04x), wwid(0x%016llx)\n", handle,
8989 (unsigned long long)raid_device->wwid);
8992 * WARPDRIVE: The handles of the PDs might have changed
8993 * across the host reset so re-initialize the
8994 * required data for Direct IO
8996 mpt3sas_init_warpdrive_properties(ioc, raid_device);
8997 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8998 if (raid_device->handle == handle) {
8999 spin_unlock_irqrestore(&ioc->raid_device_lock,
9003 pr_info("\thandle changed from(0x%04x)!!!\n",
9004 raid_device->handle);
9005 raid_device->handle = handle;
9006 if (sas_target_priv_data)
9007 sas_target_priv_data->handle = handle;
9008 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9012 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9016 * _scsih_search_responding_raid_devices -
9017 * @ioc: per adapter object
9019 * After host reset, find out whether devices are still responding.
9023 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
9025 Mpi2RaidVolPage1_t volume_pg1;
9026 Mpi2RaidVolPage0_t volume_pg0;
9027 Mpi2RaidPhysDiskPage0_t pd_pg0;
9028 Mpi2ConfigReply_t mpi_reply;
9033 if (!ioc->ir_firmware)
9036 ioc_info(ioc, "search for raid volumes: start\n");
9038 if (list_empty(&ioc->raid_device_list))
9042 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9043 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9044 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9045 MPI2_IOCSTATUS_MASK;
9046 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9048 handle = le16_to_cpu(volume_pg1.DevHandle);
9050 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9051 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9052 sizeof(Mpi2RaidVolPage0_t)))
9055 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9056 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9057 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
9058 _scsih_mark_responding_raid_device(ioc,
9059 le64_to_cpu(volume_pg1.WWID), handle);
9062 /* refresh the pd_handles */
9063 if (!ioc->is_warpdrive) {
9064 phys_disk_num = 0xFF;
9065 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
9066 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9067 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9069 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9070 MPI2_IOCSTATUS_MASK;
9071 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9073 phys_disk_num = pd_pg0.PhysDiskNum;
9074 handle = le16_to_cpu(pd_pg0.DevHandle);
9075 set_bit(handle, ioc->pd_handles);
9079 ioc_info(ioc, "search for responding raid volumes: complete\n");
9083 * _scsih_mark_responding_expander - mark a expander as responding
9084 * @ioc: per adapter object
9085 * @expander_pg0:SAS Expander Config Page0
9087 * After host reset, find out whether devices are still responding.
9088 * Used in _scsih_remove_unresponsive_expanders.
9091 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
9092 Mpi2ExpanderPage0_t *expander_pg0)
9094 struct _sas_node *sas_expander = NULL;
9095 unsigned long flags;
9097 struct _enclosure_node *enclosure_dev = NULL;
9098 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
9099 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
9100 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
9102 if (enclosure_handle)
9104 mpt3sas_scsih_enclosure_find_by_handle(ioc,
9107 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9108 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
9109 if (sas_expander->sas_address != sas_address)
9111 sas_expander->responding = 1;
9113 if (enclosure_dev) {
9114 sas_expander->enclosure_logical_id =
9115 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
9116 sas_expander->enclosure_handle =
9117 le16_to_cpu(expander_pg0->EnclosureHandle);
9120 if (sas_expander->handle == handle)
9122 pr_info("\texpander(0x%016llx): handle changed" \
9123 " from(0x%04x) to (0x%04x)!!!\n",
9124 (unsigned long long)sas_expander->sas_address,
9125 sas_expander->handle, handle);
9126 sas_expander->handle = handle;
9127 for (i = 0 ; i < sas_expander->num_phys ; i++)
9128 sas_expander->phy[i].handle = handle;
9132 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9136 * _scsih_search_responding_expanders -
9137 * @ioc: per adapter object
9139 * After host reset, find out whether devices are still responding.
9143 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
9145 Mpi2ExpanderPage0_t expander_pg0;
9146 Mpi2ConfigReply_t mpi_reply;
9151 ioc_info(ioc, "search for expanders: start\n");
9153 if (list_empty(&ioc->sas_expander_list))
9157 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9158 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9160 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9161 MPI2_IOCSTATUS_MASK;
9162 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9165 handle = le16_to_cpu(expander_pg0.DevHandle);
9166 sas_address = le64_to_cpu(expander_pg0.SASAddress);
9167 pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
9169 (unsigned long long)sas_address);
9170 _scsih_mark_responding_expander(ioc, &expander_pg0);
9174 ioc_info(ioc, "search for expanders: complete\n");
9178 * _scsih_remove_unresponding_devices - removing unresponding devices
9179 * @ioc: per adapter object
9182 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
9184 struct _sas_device *sas_device, *sas_device_next;
9185 struct _sas_node *sas_expander, *sas_expander_next;
9186 struct _raid_device *raid_device, *raid_device_next;
9187 struct _pcie_device *pcie_device, *pcie_device_next;
9188 struct list_head tmp_list;
9189 unsigned long flags;
9192 ioc_info(ioc, "removing unresponding devices: start\n");
9194 /* removing unresponding end devices */
9195 ioc_info(ioc, "removing unresponding devices: end-devices\n");
9197 * Iterate, pulling off devices marked as non-responding. We become the
9198 * owner for the reference the list had on any object we prune.
9200 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9201 list_for_each_entry_safe(sas_device, sas_device_next,
9202 &ioc->sas_device_list, list) {
9203 if (!sas_device->responding)
9204 list_move_tail(&sas_device->list, &head);
9206 sas_device->responding = 0;
9208 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9211 * Now, uninitialize and remove the unresponding devices we pruned.
9213 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
9214 _scsih_remove_device(ioc, sas_device);
9215 list_del_init(&sas_device->list);
9216 sas_device_put(sas_device);
9219 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
9220 INIT_LIST_HEAD(&head);
9221 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9222 list_for_each_entry_safe(pcie_device, pcie_device_next,
9223 &ioc->pcie_device_list, list) {
9224 if (!pcie_device->responding)
9225 list_move_tail(&pcie_device->list, &head);
9227 pcie_device->responding = 0;
9229 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9231 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
9232 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9233 list_del_init(&pcie_device->list);
9234 pcie_device_put(pcie_device);
9237 /* removing unresponding volumes */
9238 if (ioc->ir_firmware) {
9239 ioc_info(ioc, "removing unresponding devices: volumes\n");
9240 list_for_each_entry_safe(raid_device, raid_device_next,
9241 &ioc->raid_device_list, list) {
9242 if (!raid_device->responding)
9243 _scsih_sas_volume_delete(ioc,
9244 raid_device->handle);
9246 raid_device->responding = 0;
9250 /* removing unresponding expanders */
9251 ioc_info(ioc, "removing unresponding devices: expanders\n");
9252 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9253 INIT_LIST_HEAD(&tmp_list);
9254 list_for_each_entry_safe(sas_expander, sas_expander_next,
9255 &ioc->sas_expander_list, list) {
9256 if (!sas_expander->responding)
9257 list_move_tail(&sas_expander->list, &tmp_list);
9259 sas_expander->responding = 0;
9261 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9262 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
9264 _scsih_expander_node_remove(ioc, sas_expander);
9267 ioc_info(ioc, "removing unresponding devices: complete\n");
9269 /* unblock devices */
9270 _scsih_ublock_io_all_device(ioc);
9274 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
9275 struct _sas_node *sas_expander, u16 handle)
9277 Mpi2ExpanderPage1_t expander_pg1;
9278 Mpi2ConfigReply_t mpi_reply;
9281 for (i = 0 ; i < sas_expander->num_phys ; i++) {
9282 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
9283 &expander_pg1, i, handle))) {
9284 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9285 __FILE__, __LINE__, __func__);
9289 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
9290 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
9291 expander_pg1.NegotiatedLinkRate >> 4);
9296 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
9297 * @ioc: per adapter object
9300 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
9302 Mpi2ExpanderPage0_t expander_pg0;
9303 Mpi2SasDevicePage0_t sas_device_pg0;
9304 Mpi26PCIeDevicePage0_t pcie_device_pg0;
9305 Mpi2RaidVolPage1_t volume_pg1;
9306 Mpi2RaidVolPage0_t volume_pg0;
9307 Mpi2RaidPhysDiskPage0_t pd_pg0;
9308 Mpi2EventIrConfigElement_t element;
9309 Mpi2ConfigReply_t mpi_reply;
9312 u16 handle, parent_handle;
9314 struct _sas_device *sas_device;
9315 struct _pcie_device *pcie_device;
9316 struct _sas_node *expander_device;
9317 static struct _raid_device *raid_device;
9319 unsigned long flags;
9321 ioc_info(ioc, "scan devices: start\n");
9323 _scsih_sas_host_refresh(ioc);
9325 ioc_info(ioc, "\tscan devices: expanders start\n");
9329 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
9330 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
9331 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9332 MPI2_IOCSTATUS_MASK;
9333 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9334 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9335 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9338 handle = le16_to_cpu(expander_pg0.DevHandle);
9339 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9340 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
9341 ioc, le64_to_cpu(expander_pg0.SASAddress));
9342 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9343 if (expander_device)
9344 _scsih_refresh_expander_links(ioc, expander_device,
9347 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9349 (u64)le64_to_cpu(expander_pg0.SASAddress));
9350 _scsih_expander_add(ioc, handle);
9351 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
9353 (u64)le64_to_cpu(expander_pg0.SASAddress));
9357 ioc_info(ioc, "\tscan devices: expanders complete\n");
9359 if (!ioc->ir_firmware)
9362 ioc_info(ioc, "\tscan devices: phys disk start\n");
9365 phys_disk_num = 0xFF;
9366 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9367 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9369 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9370 MPI2_IOCSTATUS_MASK;
9371 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9372 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9373 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9376 phys_disk_num = pd_pg0.PhysDiskNum;
9377 handle = le16_to_cpu(pd_pg0.DevHandle);
9378 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9380 sas_device_put(sas_device);
9383 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9384 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9387 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9388 MPI2_IOCSTATUS_MASK;
9389 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9390 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9391 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9394 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9395 if (!_scsih_get_sas_address(ioc, parent_handle,
9397 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9399 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9400 mpt3sas_transport_update_links(ioc, sas_address,
9401 handle, sas_device_pg0.PhyNum,
9402 MPI2_SAS_NEG_LINK_RATE_1_5);
9403 set_bit(handle, ioc->pd_handles);
9405 /* This will retry adding the end device.
9406 * _scsih_add_device() will decide on retries and
9407 * return "1" when it should be retried
9409 while (_scsih_add_device(ioc, handle, retry_count++,
9413 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9415 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9419 ioc_info(ioc, "\tscan devices: phys disk complete\n");
9421 ioc_info(ioc, "\tscan devices: volumes start\n");
9425 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9426 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9427 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9428 MPI2_IOCSTATUS_MASK;
9429 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9430 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9431 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9434 handle = le16_to_cpu(volume_pg1.DevHandle);
9435 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9436 raid_device = _scsih_raid_device_find_by_wwid(ioc,
9437 le64_to_cpu(volume_pg1.WWID));
9438 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9441 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9442 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9443 sizeof(Mpi2RaidVolPage0_t)))
9445 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9446 MPI2_IOCSTATUS_MASK;
9447 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9448 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9449 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9452 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9453 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9454 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9455 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9456 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9457 element.VolDevHandle = volume_pg1.DevHandle;
9458 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9459 volume_pg1.DevHandle);
9460 _scsih_sas_volume_add(ioc, &element);
9461 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9462 volume_pg1.DevHandle);
9466 ioc_info(ioc, "\tscan devices: volumes complete\n");
9470 ioc_info(ioc, "\tscan devices: end devices start\n");
9474 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9475 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9477 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9478 MPI2_IOCSTATUS_MASK;
9479 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9480 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9481 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9484 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9485 if (!(_scsih_is_end_device(
9486 le32_to_cpu(sas_device_pg0.DeviceInfo))))
9488 sas_device = mpt3sas_get_sdev_by_addr(ioc,
9489 le64_to_cpu(sas_device_pg0.SASAddress));
9491 sas_device_put(sas_device);
9494 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9495 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9496 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9498 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9499 mpt3sas_transport_update_links(ioc, sas_address, handle,
9500 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9502 /* This will retry adding the end device.
9503 * _scsih_add_device() will decide on retries and
9504 * return "1" when it should be retried
9506 while (_scsih_add_device(ioc, handle, retry_count++,
9510 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9512 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9515 ioc_info(ioc, "\tscan devices: end devices complete\n");
9516 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9520 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9521 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9523 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9524 & MPI2_IOCSTATUS_MASK;
9525 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9526 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9527 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9530 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9531 if (!(_scsih_is_nvme_pciescsi_device(
9532 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9534 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9535 le64_to_cpu(pcie_device_pg0.WWID));
9537 pcie_device_put(pcie_device);
9541 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9542 _scsih_pcie_add_device(ioc, handle);
9544 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9545 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9547 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9548 ioc_info(ioc, "scan devices: complete\n");
9552 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9553 * @ioc: per adapter object
9555 * The handler for doing any required cleanup or initialization.
9557 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9559 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9563 * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
9565 * @ioc: per adapter object
9567 * The handler for doing any required cleanup or initialization.
9570 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
9573 ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
9574 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9575 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9576 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9577 complete(&ioc->scsih_cmds.done);
9579 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9580 ioc->tm_cmds.status |= MPT3_CMD_RESET;
9581 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9582 complete(&ioc->tm_cmds.done);
9585 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9586 memset(ioc->device_remove_in_progress, 0,
9587 ioc->device_remove_in_progress_sz);
9588 _scsih_fw_event_cleanup_queue(ioc);
9589 _scsih_flush_running_cmds(ioc);
9593 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9594 * @ioc: per adapter object
9596 * The handler for doing any required cleanup or initialization.
9599 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9601 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9602 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9603 !ioc->sas_hba.num_phys)) {
9604 _scsih_prep_device_scan(ioc);
9605 _scsih_create_enclosure_list_after_reset(ioc);
9606 _scsih_search_responding_sas_devices(ioc);
9607 _scsih_search_responding_pcie_devices(ioc);
9608 _scsih_search_responding_raid_devices(ioc);
9609 _scsih_search_responding_expanders(ioc);
9610 _scsih_error_recovery_delete_devices(ioc);
9615 * _mpt3sas_fw_work - delayed task for processing firmware events
9616 * @ioc: per adapter object
9617 * @fw_event: The fw_event_work object
9621 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9623 ioc->current_event = fw_event;
9624 _scsih_fw_event_del_from_list(ioc, fw_event);
9626 /* the queue is being flushed so ignore this event */
9627 if (ioc->remove_host || ioc->pci_error_recovery) {
9628 fw_event_work_put(fw_event);
9629 ioc->current_event = NULL;
9633 switch (fw_event->event) {
9634 case MPT3SAS_PROCESS_TRIGGER_DIAG:
9635 mpt3sas_process_trigger_data(ioc,
9636 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9637 fw_event->event_data);
9639 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9640 while (scsi_host_in_recovery(ioc->shost) ||
9641 ioc->shost_recovery) {
9643 * If we're unloading or cancelling the work, bail.
9644 * Otherwise, this can become an infinite loop.
9646 if (ioc->remove_host || ioc->fw_events_cleanup)
9650 _scsih_remove_unresponding_devices(ioc);
9651 _scsih_scan_for_devices_after_reset(ioc);
9652 _scsih_set_nvme_max_shutdown_latency(ioc);
9654 case MPT3SAS_PORT_ENABLE_COMPLETE:
9655 ioc->start_scan = 0;
9656 if (missing_delay[0] != -1 && missing_delay[1] != -1)
9657 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9660 ioc_info(ioc, "port enable: complete from worker thread\n"));
9662 case MPT3SAS_TURN_ON_PFA_LED:
9663 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9665 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9666 _scsih_sas_topology_change_event(ioc, fw_event);
9668 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9669 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
9670 _scsih_sas_device_status_change_event_debug(ioc,
9671 (Mpi2EventDataSasDeviceStatusChange_t *)
9672 fw_event->event_data);
9674 case MPI2_EVENT_SAS_DISCOVERY:
9675 _scsih_sas_discovery_event(ioc, fw_event);
9677 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9678 _scsih_sas_device_discovery_error_event(ioc, fw_event);
9680 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9681 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
9683 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9684 _scsih_sas_enclosure_dev_status_change_event(ioc,
9687 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9688 _scsih_sas_ir_config_change_event(ioc, fw_event);
9690 case MPI2_EVENT_IR_VOLUME:
9691 _scsih_sas_ir_volume_event(ioc, fw_event);
9693 case MPI2_EVENT_IR_PHYSICAL_DISK:
9694 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
9696 case MPI2_EVENT_IR_OPERATION_STATUS:
9697 _scsih_sas_ir_operation_status_event(ioc, fw_event);
9699 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9700 _scsih_pcie_device_status_change_event(ioc, fw_event);
9702 case MPI2_EVENT_PCIE_ENUMERATION:
9703 _scsih_pcie_enumeration_event(ioc, fw_event);
9705 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9706 _scsih_pcie_topology_change_event(ioc, fw_event);
9707 ioc->current_event = NULL;
9712 fw_event_work_put(fw_event);
9713 ioc->current_event = NULL;
9717 * _firmware_event_work
9718 * @work: The fw_event_work object
9721 * wrappers for the work thread handling firmware events
9725 _firmware_event_work(struct work_struct *work)
9727 struct fw_event_work *fw_event = container_of(work,
9728 struct fw_event_work, work);
9730 _mpt3sas_fw_work(fw_event->ioc, fw_event);
9734 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9735 * @ioc: per adapter object
9736 * @msix_index: MSIX table index supplied by the OS
9737 * @reply: reply message frame(lower 32bit addr)
9738 * Context: interrupt.
9740 * This function merely adds a new work task into ioc->firmware_event_thread.
9741 * The tasks are worked from _firmware_event_work in user context.
9743 * Return: 1 meaning mf should be freed from _base_interrupt
9744 * 0 means the mf is freed from this function.
9747 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9750 struct fw_event_work *fw_event;
9751 Mpi2EventNotificationReply_t *mpi_reply;
9754 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9756 /* events turned off due to host reset */
9757 if (ioc->pci_error_recovery)
9760 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9762 if (unlikely(!mpi_reply)) {
9763 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9764 __FILE__, __LINE__, __func__);
9768 event = le16_to_cpu(mpi_reply->Event);
9770 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9771 mpt3sas_trigger_event(ioc, event, 0);
9775 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9777 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9778 (Mpi2EventDataSasBroadcastPrimitive_t *)
9779 mpi_reply->EventData;
9781 if (baen_data->Primitive !=
9782 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9785 if (ioc->broadcast_aen_busy) {
9786 ioc->broadcast_aen_pending++;
9789 ioc->broadcast_aen_busy = 1;
9793 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9794 _scsih_check_topo_delete_events(ioc,
9795 (Mpi2EventDataSasTopologyChangeList_t *)
9796 mpi_reply->EventData);
9798 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9799 _scsih_check_pcie_topo_remove_events(ioc,
9800 (Mpi26EventDataPCIeTopologyChangeList_t *)
9801 mpi_reply->EventData);
9803 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9804 _scsih_check_ir_config_unhide_events(ioc,
9805 (Mpi2EventDataIrConfigChangeList_t *)
9806 mpi_reply->EventData);
9808 case MPI2_EVENT_IR_VOLUME:
9809 _scsih_check_volume_delete_events(ioc,
9810 (Mpi2EventDataIrVolume_t *)
9811 mpi_reply->EventData);
9813 case MPI2_EVENT_LOG_ENTRY_ADDED:
9815 Mpi2EventDataLogEntryAdded_t *log_entry;
9818 if (!ioc->is_warpdrive)
9821 log_entry = (Mpi2EventDataLogEntryAdded_t *)
9822 mpi_reply->EventData;
9823 log_code = (u32 *)log_entry->LogData;
9825 if (le16_to_cpu(log_entry->LogEntryQualifier)
9826 != MPT2_WARPDRIVE_LOGENTRY)
9829 switch (le32_to_cpu(*log_code)) {
9830 case MPT2_WARPDRIVE_LC_SSDT:
9831 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9833 case MPT2_WARPDRIVE_LC_SSDLW:
9834 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9836 case MPT2_WARPDRIVE_LC_SSDLF:
9837 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9839 case MPT2_WARPDRIVE_LC_BRMF:
9840 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9846 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9847 _scsih_sas_device_status_change_event(ioc,
9848 (Mpi2EventDataSasDeviceStatusChange_t *)
9849 mpi_reply->EventData);
9851 case MPI2_EVENT_IR_OPERATION_STATUS:
9852 case MPI2_EVENT_SAS_DISCOVERY:
9853 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9854 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9855 case MPI2_EVENT_IR_PHYSICAL_DISK:
9856 case MPI2_EVENT_PCIE_ENUMERATION:
9857 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9860 case MPI2_EVENT_TEMP_THRESHOLD:
9861 _scsih_temp_threshold_events(ioc,
9862 (Mpi2EventDataTemperature_t *)
9863 mpi_reply->EventData);
9865 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9866 ActiveCableEventData =
9867 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9868 switch (ActiveCableEventData->ReasonCode) {
9869 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9870 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9871 ActiveCableEventData->ReceptacleID);
9872 pr_notice("cannot be powered and devices connected\n");
9873 pr_notice("to this active cable will not be seen\n");
9874 pr_notice("This active cable requires %d mW of power\n",
9875 ActiveCableEventData->ActiveCablePowerRequirement);
9878 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9879 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9880 ActiveCableEventData->ReceptacleID);
9882 "is not running at optimal speed(12 Gb/s rate)\n");
9888 default: /* ignore the rest */
9892 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9893 fw_event = alloc_fw_event_work(sz);
9895 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9896 __FILE__, __LINE__, __func__);
9900 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9901 fw_event->ioc = ioc;
9902 fw_event->VF_ID = mpi_reply->VF_ID;
9903 fw_event->VP_ID = mpi_reply->VP_ID;
9904 fw_event->event = event;
9905 _scsih_fw_event_add(ioc, fw_event);
9906 fw_event_work_put(fw_event);
9911 * _scsih_expander_node_remove - removing expander device from list.
9912 * @ioc: per adapter object
9913 * @sas_expander: the sas_device object
9915 * Removing object and freeing associated memory from the
9916 * ioc->sas_expander_list.
9919 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9920 struct _sas_node *sas_expander)
9922 struct _sas_port *mpt3sas_port, *next;
9923 unsigned long flags;
9925 /* remove sibling ports attached to this expander */
9926 list_for_each_entry_safe(mpt3sas_port, next,
9927 &sas_expander->sas_port_list, port_list) {
9928 if (ioc->shost_recovery)
9930 if (mpt3sas_port->remote_identify.device_type ==
9932 mpt3sas_device_remove_by_sas_address(ioc,
9933 mpt3sas_port->remote_identify.sas_address);
9934 else if (mpt3sas_port->remote_identify.device_type ==
9935 SAS_EDGE_EXPANDER_DEVICE ||
9936 mpt3sas_port->remote_identify.device_type ==
9937 SAS_FANOUT_EXPANDER_DEVICE)
9938 mpt3sas_expander_remove(ioc,
9939 mpt3sas_port->remote_identify.sas_address);
9942 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9943 sas_expander->sas_address_parent);
9945 ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9946 sas_expander->handle, (unsigned long long)
9947 sas_expander->sas_address);
9949 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9950 list_del(&sas_expander->list);
9951 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9953 kfree(sas_expander->phy);
9954 kfree(sas_expander);
9958 * _scsih_nvme_shutdown - NVMe shutdown notification
9959 * @ioc: per adapter object
9961 * Sending IoUnitControl request with shutdown operation code to alert IOC that
9962 * the host system is shutting down so that IOC can issue NVMe shutdown to
9963 * NVMe drives attached to it.
9966 _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
9968 Mpi26IoUnitControlRequest_t *mpi_request;
9969 Mpi26IoUnitControlReply_t *mpi_reply;
9972 /* are there any NVMe devices ? */
9973 if (list_empty(&ioc->pcie_device_list))
9976 mutex_lock(&ioc->scsih_cmds.mutex);
9978 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9979 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9983 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9985 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9988 "%s: failed obtaining a smid\n", __func__);
9989 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9993 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9994 ioc->scsih_cmds.smid = smid;
9995 memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
9996 mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
9997 mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
9999 init_completion(&ioc->scsih_cmds.done);
10000 ioc->put_smid_default(ioc, smid);
10001 /* Wait for max_shutdown_latency seconds */
10003 "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
10004 ioc->max_shutdown_latency);
10005 wait_for_completion_timeout(&ioc->scsih_cmds.done,
10006 ioc->max_shutdown_latency*HZ);
10008 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10009 ioc_err(ioc, "%s: timeout\n", __func__);
10013 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10014 mpi_reply = ioc->scsih_cmds.reply;
10015 ioc_info(ioc, "Io Unit Control shutdown (complete):"
10016 "ioc_status(0x%04x), loginfo(0x%08x)\n",
10017 le16_to_cpu(mpi_reply->IOCStatus),
10018 le32_to_cpu(mpi_reply->IOCLogInfo));
10021 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10022 mutex_unlock(&ioc->scsih_cmds.mutex);
10027 * _scsih_ir_shutdown - IR shutdown notification
10028 * @ioc: per adapter object
10030 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
10031 * the host system is shutting down.
10034 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
10036 Mpi2RaidActionRequest_t *mpi_request;
10037 Mpi2RaidActionReply_t *mpi_reply;
10040 /* is IR firmware build loaded ? */
10041 if (!ioc->ir_firmware)
10044 /* are there any volumes ? */
10045 if (list_empty(&ioc->raid_device_list))
10048 mutex_lock(&ioc->scsih_cmds.mutex);
10050 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
10051 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
10054 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
10056 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
10058 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
10059 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10063 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
10064 ioc->scsih_cmds.smid = smid;
10065 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
10067 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
10068 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
10070 if (!ioc->hide_ir_msg)
10071 ioc_info(ioc, "IR shutdown (sending)\n");
10072 init_completion(&ioc->scsih_cmds.done);
10073 ioc->put_smid_default(ioc, smid);
10074 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
10076 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
10077 ioc_err(ioc, "%s: timeout\n", __func__);
10081 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
10082 mpi_reply = ioc->scsih_cmds.reply;
10083 if (!ioc->hide_ir_msg)
10084 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
10085 le16_to_cpu(mpi_reply->IOCStatus),
10086 le32_to_cpu(mpi_reply->IOCLogInfo));
10090 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
10091 mutex_unlock(&ioc->scsih_cmds.mutex);
10095 * _scsih_get_shost_and_ioc - get shost and ioc
10096 * and verify whether they are NULL or not
10097 * @pdev: PCI device struct
10098 * @shost: address of scsi host pointer
10099 * @ioc: address of HBA adapter pointer
10101 * Return zero if *shost and *ioc are not NULL otherwise return error number.
10104 _scsih_get_shost_and_ioc(struct pci_dev *pdev,
10105 struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
10107 *shost = pci_get_drvdata(pdev);
10108 if (*shost == NULL) {
10109 dev_err(&pdev->dev, "pdev's driver data is null\n");
10113 *ioc = shost_priv(*shost);
10114 if (*ioc == NULL) {
10115 dev_err(&pdev->dev, "shost's private data is null\n");
10123 * scsih_remove - detach and remove add host
10124 * @pdev: PCI device struct
10126 * Routine called when unloading the driver.
10128 static void scsih_remove(struct pci_dev *pdev)
10130 struct Scsi_Host *shost;
10131 struct MPT3SAS_ADAPTER *ioc;
10132 struct _sas_port *mpt3sas_port, *next_port;
10133 struct _raid_device *raid_device, *next;
10134 struct MPT3SAS_TARGET *sas_target_priv_data;
10135 struct _pcie_device *pcie_device, *pcienext;
10136 struct workqueue_struct *wq;
10137 unsigned long flags;
10138 Mpi2ConfigReply_t mpi_reply;
10140 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
10143 ioc->remove_host = 1;
10145 if (!pci_device_is_present(pdev))
10146 _scsih_flush_running_cmds(ioc);
10148 _scsih_fw_event_cleanup_queue(ioc);
10150 spin_lock_irqsave(&ioc->fw_event_lock, flags);
10151 wq = ioc->firmware_event_thread;
10152 ioc->firmware_event_thread = NULL;
10153 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
10155 destroy_workqueue(wq);
10157 * Copy back the unmodified ioc page1. so that on next driver load,
10158 * current modified changes on ioc page1 won't take effect.
10160 if (ioc->is_aero_ioc)
10161 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
10162 &ioc->ioc_pg1_copy);
10163 /* release all the volumes */
10164 _scsih_ir_shutdown(ioc);
10165 mpt3sas_destroy_debugfs(ioc);
10166 sas_remove_host(shost);
10167 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
10169 if (raid_device->starget) {
10170 sas_target_priv_data =
10171 raid_device->starget->hostdata;
10172 sas_target_priv_data->deleted = 1;
10173 scsi_remove_target(&raid_device->starget->dev);
10175 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
10176 raid_device->handle, (u64)raid_device->wwid);
10177 _scsih_raid_device_remove(ioc, raid_device);
10179 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
10181 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10182 list_del_init(&pcie_device->list);
10183 pcie_device_put(pcie_device);
10186 /* free ports attached to the sas_host */
10187 list_for_each_entry_safe(mpt3sas_port, next_port,
10188 &ioc->sas_hba.sas_port_list, port_list) {
10189 if (mpt3sas_port->remote_identify.device_type ==
10191 mpt3sas_device_remove_by_sas_address(ioc,
10192 mpt3sas_port->remote_identify.sas_address);
10193 else if (mpt3sas_port->remote_identify.device_type ==
10194 SAS_EDGE_EXPANDER_DEVICE ||
10195 mpt3sas_port->remote_identify.device_type ==
10196 SAS_FANOUT_EXPANDER_DEVICE)
10197 mpt3sas_expander_remove(ioc,
10198 mpt3sas_port->remote_identify.sas_address);
10201 /* free phys attached to the sas_host */
10202 if (ioc->sas_hba.num_phys) {
10203 kfree(ioc->sas_hba.phy);
10204 ioc->sas_hba.phy = NULL;
10205 ioc->sas_hba.num_phys = 0;
10208 mpt3sas_base_detach(ioc);
10209 spin_lock(&gioc_lock);
10210 list_del(&ioc->list);
10211 spin_unlock(&gioc_lock);
10212 scsi_host_put(shost);
10216 * scsih_shutdown - routine call during system shutdown
10217 * @pdev: PCI device struct
10220 scsih_shutdown(struct pci_dev *pdev)
10222 struct Scsi_Host *shost;
10223 struct MPT3SAS_ADAPTER *ioc;
10224 struct workqueue_struct *wq;
10225 unsigned long flags;
10226 Mpi2ConfigReply_t mpi_reply;
10228 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
10231 ioc->remove_host = 1;
10233 if (!pci_device_is_present(pdev))
10234 _scsih_flush_running_cmds(ioc);
10236 _scsih_fw_event_cleanup_queue(ioc);
10238 spin_lock_irqsave(&ioc->fw_event_lock, flags);
10239 wq = ioc->firmware_event_thread;
10240 ioc->firmware_event_thread = NULL;
10241 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
10243 destroy_workqueue(wq);
10245 * Copy back the unmodified ioc page1 so that on next driver load,
10246 * current modified changes on ioc page1 won't take effect.
10248 if (ioc->is_aero_ioc)
10249 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
10250 &ioc->ioc_pg1_copy);
10252 _scsih_ir_shutdown(ioc);
10253 _scsih_nvme_shutdown(ioc);
10254 mpt3sas_base_detach(ioc);
10259 * _scsih_probe_boot_devices - reports 1st device
10260 * @ioc: per adapter object
10262 * If specified in bios page 2, this routine reports the 1st
10263 * device scsi-ml or sas transport for persistent boot device
10264 * purposes. Please refer to function _scsih_determine_boot_device()
10267 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
10271 struct _sas_device *sas_device;
10272 struct _raid_device *raid_device;
10273 struct _pcie_device *pcie_device;
10275 u64 sas_address_parent;
10277 unsigned long flags;
10281 /* no Bios, return immediately */
10282 if (!ioc->bios_pg3.BiosVersion)
10286 if (ioc->req_boot_device.device) {
10287 device = ioc->req_boot_device.device;
10288 channel = ioc->req_boot_device.channel;
10289 } else if (ioc->req_alt_boot_device.device) {
10290 device = ioc->req_alt_boot_device.device;
10291 channel = ioc->req_alt_boot_device.channel;
10292 } else if (ioc->current_boot_device.device) {
10293 device = ioc->current_boot_device.device;
10294 channel = ioc->current_boot_device.channel;
10300 if (channel == RAID_CHANNEL) {
10301 raid_device = device;
10302 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10303 raid_device->id, 0);
10305 _scsih_raid_device_remove(ioc, raid_device);
10306 } else if (channel == PCIE_CHANNEL) {
10307 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10308 pcie_device = device;
10309 tid = pcie_device->id;
10310 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
10311 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10312 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
10314 _scsih_pcie_device_remove(ioc, pcie_device);
10316 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10317 sas_device = device;
10318 handle = sas_device->handle;
10319 sas_address_parent = sas_device->sas_address_parent;
10320 sas_address = sas_device->sas_address;
10321 list_move_tail(&sas_device->list, &ioc->sas_device_list);
10322 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10324 if (ioc->hide_drives)
10326 if (!mpt3sas_transport_port_add(ioc, handle,
10327 sas_address_parent)) {
10328 _scsih_sas_device_remove(ioc, sas_device);
10329 } else if (!sas_device->starget) {
10330 if (!ioc->is_driver_loading) {
10331 mpt3sas_transport_port_remove(ioc,
10333 sas_address_parent);
10334 _scsih_sas_device_remove(ioc, sas_device);
10341 * _scsih_probe_raid - reporting raid volumes to scsi-ml
10342 * @ioc: per adapter object
10344 * Called during initial loading of the driver.
10347 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
10349 struct _raid_device *raid_device, *raid_next;
10352 list_for_each_entry_safe(raid_device, raid_next,
10353 &ioc->raid_device_list, list) {
10354 if (raid_device->starget)
10356 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
10357 raid_device->id, 0);
10359 _scsih_raid_device_remove(ioc, raid_device);
10363 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
10365 struct _sas_device *sas_device = NULL;
10366 unsigned long flags;
10368 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10369 if (!list_empty(&ioc->sas_device_init_list)) {
10370 sas_device = list_first_entry(&ioc->sas_device_init_list,
10371 struct _sas_device, list);
10372 sas_device_get(sas_device);
10374 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10379 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10380 struct _sas_device *sas_device)
10382 unsigned long flags;
10384 spin_lock_irqsave(&ioc->sas_device_lock, flags);
10387 * Since we dropped the lock during the call to port_add(), we need to
10388 * be careful here that somebody else didn't move or delete this item
10389 * while we were busy with other things.
10391 * If it was on the list, we need a put() for the reference the list
10392 * had. Either way, we need a get() for the destination list.
10394 if (!list_empty(&sas_device->list)) {
10395 list_del_init(&sas_device->list);
10396 sas_device_put(sas_device);
10399 sas_device_get(sas_device);
10400 list_add_tail(&sas_device->list, &ioc->sas_device_list);
10402 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10406 * _scsih_probe_sas - reporting sas devices to sas transport
10407 * @ioc: per adapter object
10409 * Called during initial loading of the driver.
10412 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
10414 struct _sas_device *sas_device;
10416 if (ioc->hide_drives)
10419 while ((sas_device = get_next_sas_device(ioc))) {
10420 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
10421 sas_device->sas_address_parent)) {
10422 _scsih_sas_device_remove(ioc, sas_device);
10423 sas_device_put(sas_device);
10425 } else if (!sas_device->starget) {
10427 * When asyn scanning is enabled, its not possible to
10428 * remove devices while scanning is turned on due to an
10429 * oops in scsi_sysfs_add_sdev()->add_device()->
10430 * sysfs_addrm_start()
10432 if (!ioc->is_driver_loading) {
10433 mpt3sas_transport_port_remove(ioc,
10434 sas_device->sas_address,
10435 sas_device->sas_address_parent);
10436 _scsih_sas_device_remove(ioc, sas_device);
10437 sas_device_put(sas_device);
10441 sas_device_make_active(ioc, sas_device);
10442 sas_device_put(sas_device);
10447 * get_next_pcie_device - Get the next pcie device
10448 * @ioc: per adapter object
10450 * Get the next pcie device from pcie_device_init_list list.
10452 * Return: pcie device structure if pcie_device_init_list list is not empty
10453 * otherwise returns NULL
10455 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
10457 struct _pcie_device *pcie_device = NULL;
10458 unsigned long flags;
10460 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10461 if (!list_empty(&ioc->pcie_device_init_list)) {
10462 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
10463 struct _pcie_device, list);
10464 pcie_device_get(pcie_device);
10466 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10468 return pcie_device;
10472 * pcie_device_make_active - Add pcie device to pcie_device_list list
10473 * @ioc: per adapter object
10474 * @pcie_device: pcie device object
10476 * Add the pcie device which has registered with SCSI Transport Later to
10477 * pcie_device_list list
10479 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
10480 struct _pcie_device *pcie_device)
10482 unsigned long flags;
10484 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10486 if (!list_empty(&pcie_device->list)) {
10487 list_del_init(&pcie_device->list);
10488 pcie_device_put(pcie_device);
10490 pcie_device_get(pcie_device);
10491 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10493 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10497 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10498 * @ioc: per adapter object
10500 * Called during initial loading of the driver.
10503 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10505 struct _pcie_device *pcie_device;
10508 /* PCIe Device List */
10509 while ((pcie_device = get_next_pcie_device(ioc))) {
10510 if (pcie_device->starget) {
10511 pcie_device_put(pcie_device);
10514 if (pcie_device->access_status ==
10515 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
10516 pcie_device_make_active(ioc, pcie_device);
10517 pcie_device_put(pcie_device);
10520 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10521 pcie_device->id, 0);
10523 _scsih_pcie_device_remove(ioc, pcie_device);
10524 pcie_device_put(pcie_device);
10526 } else if (!pcie_device->starget) {
10528 * When async scanning is enabled, its not possible to
10529 * remove devices while scanning is turned on due to an
10530 * oops in scsi_sysfs_add_sdev()->add_device()->
10531 * sysfs_addrm_start()
10533 if (!ioc->is_driver_loading) {
10534 /* TODO-- Need to find out whether this condition will
10537 _scsih_pcie_device_remove(ioc, pcie_device);
10538 pcie_device_put(pcie_device);
10542 pcie_device_make_active(ioc, pcie_device);
10543 pcie_device_put(pcie_device);
10548 * _scsih_probe_devices - probing for devices
10549 * @ioc: per adapter object
10551 * Called during initial loading of the driver.
10554 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10556 u16 volume_mapping_flags;
10558 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10559 return; /* return when IOC doesn't support initiator mode */
10561 _scsih_probe_boot_devices(ioc);
10563 if (ioc->ir_firmware) {
10564 volume_mapping_flags =
10565 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10566 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10567 if (volume_mapping_flags ==
10568 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10569 _scsih_probe_raid(ioc);
10570 _scsih_probe_sas(ioc);
10572 _scsih_probe_sas(ioc);
10573 _scsih_probe_raid(ioc);
10576 _scsih_probe_sas(ioc);
10577 _scsih_probe_pcie(ioc);
10582 * scsih_scan_start - scsi lld callback for .scan_start
10583 * @shost: SCSI host pointer
10585 * The shost has the ability to discover targets on its own instead
10586 * of scanning the entire bus. In our implemention, we will kick off
10587 * firmware discovery.
10590 scsih_scan_start(struct Scsi_Host *shost)
10592 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10594 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10595 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10596 else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
10597 mpt3sas_enable_diag_buffer(ioc, 1);
10599 if (disable_discovery > 0)
10602 ioc->start_scan = 1;
10603 rc = mpt3sas_port_enable(ioc);
10606 ioc_info(ioc, "port enable: FAILED\n");
10610 * scsih_scan_finished - scsi lld callback for .scan_finished
10611 * @shost: SCSI host pointer
10612 * @time: elapsed time of the scan in jiffies
10614 * This function will be called periodicallyn until it returns 1 with the
10615 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10616 * we wait for firmware discovery to complete, then return 1.
10619 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10621 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10623 if (disable_discovery > 0) {
10624 ioc->is_driver_loading = 0;
10625 ioc->wait_for_discovery_to_complete = 0;
10629 if (time >= (300 * HZ)) {
10630 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10631 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10632 ioc->is_driver_loading = 0;
10636 if (ioc->start_scan)
10639 if (ioc->start_scan_failed) {
10640 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10641 ioc->start_scan_failed);
10642 ioc->is_driver_loading = 0;
10643 ioc->wait_for_discovery_to_complete = 0;
10644 ioc->remove_host = 1;
10648 ioc_info(ioc, "port enable: SUCCESS\n");
10649 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10651 if (ioc->wait_for_discovery_to_complete) {
10652 ioc->wait_for_discovery_to_complete = 0;
10653 _scsih_probe_devices(ioc);
10655 mpt3sas_base_start_watchdog(ioc);
10656 ioc->is_driver_loading = 0;
10660 /* shost template for SAS 2.0 HBA devices */
10661 static struct scsi_host_template mpt2sas_driver_template = {
10662 .module = THIS_MODULE,
10663 .name = "Fusion MPT SAS Host",
10664 .proc_name = MPT2SAS_DRIVER_NAME,
10665 .queuecommand = scsih_qcmd,
10666 .target_alloc = scsih_target_alloc,
10667 .slave_alloc = scsih_slave_alloc,
10668 .slave_configure = scsih_slave_configure,
10669 .target_destroy = scsih_target_destroy,
10670 .slave_destroy = scsih_slave_destroy,
10671 .scan_finished = scsih_scan_finished,
10672 .scan_start = scsih_scan_start,
10673 .change_queue_depth = scsih_change_queue_depth,
10674 .eh_abort_handler = scsih_abort,
10675 .eh_device_reset_handler = scsih_dev_reset,
10676 .eh_target_reset_handler = scsih_target_reset,
10677 .eh_host_reset_handler = scsih_host_reset,
10678 .bios_param = scsih_bios_param,
10681 .sg_tablesize = MPT2SAS_SG_DEPTH,
10682 .max_sectors = 32767,
10684 .shost_attrs = mpt3sas_host_attrs,
10685 .sdev_attrs = mpt3sas_dev_attrs,
10686 .track_queue_depth = 1,
10687 .cmd_size = sizeof(struct scsiio_tracker),
10690 /* raid transport support for SAS 2.0 HBA devices */
10691 static struct raid_function_template mpt2sas_raid_functions = {
10692 .cookie = &mpt2sas_driver_template,
10693 .is_raid = scsih_is_raid,
10694 .get_resync = scsih_get_resync,
10695 .get_state = scsih_get_state,
10698 /* shost template for SAS 3.0 HBA devices */
10699 static struct scsi_host_template mpt3sas_driver_template = {
10700 .module = THIS_MODULE,
10701 .name = "Fusion MPT SAS Host",
10702 .proc_name = MPT3SAS_DRIVER_NAME,
10703 .queuecommand = scsih_qcmd,
10704 .target_alloc = scsih_target_alloc,
10705 .slave_alloc = scsih_slave_alloc,
10706 .slave_configure = scsih_slave_configure,
10707 .target_destroy = scsih_target_destroy,
10708 .slave_destroy = scsih_slave_destroy,
10709 .scan_finished = scsih_scan_finished,
10710 .scan_start = scsih_scan_start,
10711 .change_queue_depth = scsih_change_queue_depth,
10712 .eh_abort_handler = scsih_abort,
10713 .eh_device_reset_handler = scsih_dev_reset,
10714 .eh_target_reset_handler = scsih_target_reset,
10715 .eh_host_reset_handler = scsih_host_reset,
10716 .bios_param = scsih_bios_param,
10719 .sg_tablesize = MPT3SAS_SG_DEPTH,
10720 .max_sectors = 32767,
10721 .max_segment_size = 0xffffffff,
10723 .shost_attrs = mpt3sas_host_attrs,
10724 .sdev_attrs = mpt3sas_dev_attrs,
10725 .track_queue_depth = 1,
10726 .cmd_size = sizeof(struct scsiio_tracker),
10729 /* raid transport support for SAS 3.0 HBA devices */
10730 static struct raid_function_template mpt3sas_raid_functions = {
10731 .cookie = &mpt3sas_driver_template,
10732 .is_raid = scsih_is_raid,
10733 .get_resync = scsih_get_resync,
10734 .get_state = scsih_get_state,
10738 * _scsih_determine_hba_mpi_version - determine in which MPI version class
10739 * this device belongs to.
10740 * @pdev: PCI device struct
10742 * return MPI2_VERSION for SAS 2.0 HBA devices,
10743 * MPI25_VERSION for SAS 3.0 HBA devices, and
10744 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10747 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10750 switch (pdev->device) {
10751 case MPI2_MFGPAGE_DEVID_SSS6200:
10752 case MPI2_MFGPAGE_DEVID_SAS2004:
10753 case MPI2_MFGPAGE_DEVID_SAS2008:
10754 case MPI2_MFGPAGE_DEVID_SAS2108_1:
10755 case MPI2_MFGPAGE_DEVID_SAS2108_2:
10756 case MPI2_MFGPAGE_DEVID_SAS2108_3:
10757 case MPI2_MFGPAGE_DEVID_SAS2116_1:
10758 case MPI2_MFGPAGE_DEVID_SAS2116_2:
10759 case MPI2_MFGPAGE_DEVID_SAS2208_1:
10760 case MPI2_MFGPAGE_DEVID_SAS2208_2:
10761 case MPI2_MFGPAGE_DEVID_SAS2208_3:
10762 case MPI2_MFGPAGE_DEVID_SAS2208_4:
10763 case MPI2_MFGPAGE_DEVID_SAS2208_5:
10764 case MPI2_MFGPAGE_DEVID_SAS2208_6:
10765 case MPI2_MFGPAGE_DEVID_SAS2308_1:
10766 case MPI2_MFGPAGE_DEVID_SAS2308_2:
10767 case MPI2_MFGPAGE_DEVID_SAS2308_3:
10768 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10769 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10770 return MPI2_VERSION;
10771 case MPI25_MFGPAGE_DEVID_SAS3004:
10772 case MPI25_MFGPAGE_DEVID_SAS3008:
10773 case MPI25_MFGPAGE_DEVID_SAS3108_1:
10774 case MPI25_MFGPAGE_DEVID_SAS3108_2:
10775 case MPI25_MFGPAGE_DEVID_SAS3108_5:
10776 case MPI25_MFGPAGE_DEVID_SAS3108_6:
10777 return MPI25_VERSION;
10778 case MPI26_MFGPAGE_DEVID_SAS3216:
10779 case MPI26_MFGPAGE_DEVID_SAS3224:
10780 case MPI26_MFGPAGE_DEVID_SAS3316_1:
10781 case MPI26_MFGPAGE_DEVID_SAS3316_2:
10782 case MPI26_MFGPAGE_DEVID_SAS3316_3:
10783 case MPI26_MFGPAGE_DEVID_SAS3316_4:
10784 case MPI26_MFGPAGE_DEVID_SAS3324_1:
10785 case MPI26_MFGPAGE_DEVID_SAS3324_2:
10786 case MPI26_MFGPAGE_DEVID_SAS3324_3:
10787 case MPI26_MFGPAGE_DEVID_SAS3324_4:
10788 case MPI26_MFGPAGE_DEVID_SAS3508:
10789 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10790 case MPI26_MFGPAGE_DEVID_SAS3408:
10791 case MPI26_MFGPAGE_DEVID_SAS3516:
10792 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10793 case MPI26_MFGPAGE_DEVID_SAS3416:
10794 case MPI26_MFGPAGE_DEVID_SAS3616:
10795 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10796 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10797 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10798 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10799 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10800 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
10801 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
10802 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
10803 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
10804 return MPI26_VERSION;
10810 * _scsih_probe - attach and add scsi host
10811 * @pdev: PCI device struct
10812 * @id: pci device id
10814 * Return: 0 success, anything else error.
10817 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10819 struct MPT3SAS_ADAPTER *ioc;
10820 struct Scsi_Host *shost = NULL;
10822 u16 hba_mpi_version;
10824 /* Determine in which MPI version class this pci device belongs */
10825 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10826 if (hba_mpi_version == 0)
10829 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10830 * for other generation HBA's return with -ENODEV
10832 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
10835 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10836 * for other generation HBA's return with -ENODEV
10838 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
10839 || hba_mpi_version == MPI26_VERSION)))
10842 switch (hba_mpi_version) {
10844 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10845 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10846 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
10847 shost = scsi_host_alloc(&mpt2sas_driver_template,
10848 sizeof(struct MPT3SAS_ADAPTER));
10851 ioc = shost_priv(shost);
10852 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10853 ioc->hba_mpi_version_belonged = hba_mpi_version;
10854 ioc->id = mpt2_ids++;
10855 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10856 switch (pdev->device) {
10857 case MPI2_MFGPAGE_DEVID_SSS6200:
10858 ioc->is_warpdrive = 1;
10859 ioc->hide_ir_msg = 1;
10861 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10862 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10863 ioc->is_mcpu_endpoint = 1;
10866 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10870 case MPI25_VERSION:
10871 case MPI26_VERSION:
10872 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
10873 shost = scsi_host_alloc(&mpt3sas_driver_template,
10874 sizeof(struct MPT3SAS_ADAPTER));
10877 ioc = shost_priv(shost);
10878 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10879 ioc->hba_mpi_version_belonged = hba_mpi_version;
10880 ioc->id = mpt3_ids++;
10881 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10882 switch (pdev->device) {
10883 case MPI26_MFGPAGE_DEVID_SAS3508:
10884 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10885 case MPI26_MFGPAGE_DEVID_SAS3408:
10886 case MPI26_MFGPAGE_DEVID_SAS3516:
10887 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10888 case MPI26_MFGPAGE_DEVID_SAS3416:
10889 case MPI26_MFGPAGE_DEVID_SAS3616:
10890 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10891 ioc->is_gen35_ioc = 1;
10893 case MPI26_MFGPAGE_DEVID_INVALID0_3816:
10894 case MPI26_MFGPAGE_DEVID_INVALID0_3916:
10895 dev_err(&pdev->dev,
10896 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
10897 pdev->device, pdev->subsystem_vendor,
10898 pdev->subsystem_device);
10900 case MPI26_MFGPAGE_DEVID_INVALID1_3816:
10901 case MPI26_MFGPAGE_DEVID_INVALID1_3916:
10902 dev_err(&pdev->dev,
10903 "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
10904 pdev->device, pdev->subsystem_vendor,
10905 pdev->subsystem_device);
10907 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10908 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10909 dev_info(&pdev->dev,
10910 "HBA is in Configurable Secure mode\n");
10912 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10913 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10914 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10917 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10919 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10920 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10921 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10922 ioc->combined_reply_queue = 1;
10923 if (ioc->is_gen35_ioc)
10924 ioc->combined_reply_index_count =
10925 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10927 ioc->combined_reply_index_count =
10928 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10935 INIT_LIST_HEAD(&ioc->list);
10936 spin_lock(&gioc_lock);
10937 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10938 spin_unlock(&gioc_lock);
10939 ioc->shost = shost;
10941 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10942 ioc->tm_cb_idx = tm_cb_idx;
10943 ioc->ctl_cb_idx = ctl_cb_idx;
10944 ioc->base_cb_idx = base_cb_idx;
10945 ioc->port_enable_cb_idx = port_enable_cb_idx;
10946 ioc->transport_cb_idx = transport_cb_idx;
10947 ioc->scsih_cb_idx = scsih_cb_idx;
10948 ioc->config_cb_idx = config_cb_idx;
10949 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10950 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10951 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10952 ioc->logging_level = logging_level;
10953 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10954 /* Host waits for minimum of six seconds */
10955 ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
10957 * Enable MEMORY MOVE support flag.
10959 ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
10961 ioc->enable_sdev_max_qd = enable_sdev_max_qd;
10963 /* misc semaphores and spin locks */
10964 mutex_init(&ioc->reset_in_progress_mutex);
10965 /* initializing pci_access_mutex lock */
10966 mutex_init(&ioc->pci_access_mutex);
10967 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10968 spin_lock_init(&ioc->scsi_lookup_lock);
10969 spin_lock_init(&ioc->sas_device_lock);
10970 spin_lock_init(&ioc->sas_node_lock);
10971 spin_lock_init(&ioc->fw_event_lock);
10972 spin_lock_init(&ioc->raid_device_lock);
10973 spin_lock_init(&ioc->pcie_device_lock);
10974 spin_lock_init(&ioc->diag_trigger_lock);
10976 INIT_LIST_HEAD(&ioc->sas_device_list);
10977 INIT_LIST_HEAD(&ioc->sas_device_init_list);
10978 INIT_LIST_HEAD(&ioc->sas_expander_list);
10979 INIT_LIST_HEAD(&ioc->enclosure_list);
10980 INIT_LIST_HEAD(&ioc->pcie_device_list);
10981 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10982 INIT_LIST_HEAD(&ioc->fw_event_list);
10983 INIT_LIST_HEAD(&ioc->raid_device_list);
10984 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10985 INIT_LIST_HEAD(&ioc->delayed_tr_list);
10986 INIT_LIST_HEAD(&ioc->delayed_sc_list);
10987 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10988 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10989 INIT_LIST_HEAD(&ioc->reply_queue_list);
10991 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10993 /* init shost parameters */
10994 shost->max_cmd_len = 32;
10995 shost->max_lun = max_lun;
10996 shost->transportt = mpt3sas_transport_template;
10997 shost->unique_id = ioc->id;
10999 if (ioc->is_mcpu_endpoint) {
11000 /* mCPU MPI support 64K max IO */
11001 shost->max_sectors = 128;
11002 ioc_info(ioc, "The max_sectors value is set to %d\n",
11003 shost->max_sectors);
11005 if (max_sectors != 0xFFFF) {
11006 if (max_sectors < 64) {
11007 shost->max_sectors = 64;
11008 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
11010 } else if (max_sectors > 32767) {
11011 shost->max_sectors = 32767;
11012 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
11015 shost->max_sectors = max_sectors & 0xFFFE;
11016 ioc_info(ioc, "The max_sectors value is set to %d\n",
11017 shost->max_sectors);
11021 /* register EEDP capabilities with SCSI layer */
11022 if (prot_mask >= 0)
11023 scsi_host_set_prot(shost, (prot_mask & 0x07));
11025 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
11026 | SHOST_DIF_TYPE2_PROTECTION
11027 | SHOST_DIF_TYPE3_PROTECTION);
11029 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
11032 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
11033 "fw_event_%s%d", ioc->driver_name, ioc->id);
11034 ioc->firmware_event_thread = alloc_ordered_workqueue(
11035 ioc->firmware_event_name, 0);
11036 if (!ioc->firmware_event_thread) {
11037 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11038 __FILE__, __LINE__, __func__);
11040 goto out_thread_fail;
11043 ioc->is_driver_loading = 1;
11044 if ((mpt3sas_base_attach(ioc))) {
11045 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11046 __FILE__, __LINE__, __func__);
11048 goto out_attach_fail;
11051 if (ioc->is_warpdrive) {
11052 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
11053 ioc->hide_drives = 0;
11054 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
11055 ioc->hide_drives = 1;
11057 if (mpt3sas_get_num_volumes(ioc))
11058 ioc->hide_drives = 1;
11060 ioc->hide_drives = 0;
11063 ioc->hide_drives = 0;
11065 rv = scsi_add_host(shost, &pdev->dev);
11067 ioc_err(ioc, "failure at %s:%d/%s()!\n",
11068 __FILE__, __LINE__, __func__);
11069 goto out_add_shost_fail;
11072 scsi_scan_host(shost);
11073 mpt3sas_setup_debugfs(ioc);
11075 out_add_shost_fail:
11076 mpt3sas_base_detach(ioc);
11078 destroy_workqueue(ioc->firmware_event_thread);
11080 spin_lock(&gioc_lock);
11081 list_del(&ioc->list);
11082 spin_unlock(&gioc_lock);
11083 scsi_host_put(shost);
11089 * scsih_suspend - power management suspend main entry point
11090 * @pdev: PCI device struct
11091 * @state: PM state change to (usually PCI_D3)
11093 * Return: 0 success, anything else error.
11096 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
11098 struct Scsi_Host *shost;
11099 struct MPT3SAS_ADAPTER *ioc;
11100 pci_power_t device_state;
11103 rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
11107 mpt3sas_base_stop_watchdog(ioc);
11108 flush_scheduled_work();
11109 scsi_block_requests(shost);
11110 _scsih_nvme_shutdown(ioc);
11111 device_state = pci_choose_state(pdev, state);
11112 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
11113 pdev, pci_name(pdev), device_state);
11115 pci_save_state(pdev);
11116 mpt3sas_base_free_resources(ioc);
11117 pci_set_power_state(pdev, device_state);
11122 * scsih_resume - power management resume main entry point
11123 * @pdev: PCI device struct
11125 * Return: 0 success, anything else error.
11128 scsih_resume(struct pci_dev *pdev)
11130 struct Scsi_Host *shost;
11131 struct MPT3SAS_ADAPTER *ioc;
11132 pci_power_t device_state = pdev->current_state;
11135 r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
11139 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
11140 pdev, pci_name(pdev), device_state);
11142 pci_set_power_state(pdev, PCI_D0);
11143 pci_enable_wake(pdev, PCI_D0, 0);
11144 pci_restore_state(pdev);
11146 r = mpt3sas_base_map_resources(ioc);
11149 ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
11150 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
11151 scsi_unblock_requests(shost);
11152 mpt3sas_base_start_watchdog(ioc);
11155 #endif /* CONFIG_PM */
11158 * scsih_pci_error_detected - Called when a PCI error is detected.
11159 * @pdev: PCI device struct
11160 * @state: PCI channel state
11162 * Description: Called when a PCI error is detected.
11164 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
11166 static pci_ers_result_t
11167 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
11169 struct Scsi_Host *shost;
11170 struct MPT3SAS_ADAPTER *ioc;
11172 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11173 return PCI_ERS_RESULT_DISCONNECT;
11175 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
11178 case pci_channel_io_normal:
11179 return PCI_ERS_RESULT_CAN_RECOVER;
11180 case pci_channel_io_frozen:
11181 /* Fatal error, prepare for slot reset */
11182 ioc->pci_error_recovery = 1;
11183 scsi_block_requests(ioc->shost);
11184 mpt3sas_base_stop_watchdog(ioc);
11185 mpt3sas_base_free_resources(ioc);
11186 return PCI_ERS_RESULT_NEED_RESET;
11187 case pci_channel_io_perm_failure:
11188 /* Permanent error, prepare for device removal */
11189 ioc->pci_error_recovery = 1;
11190 mpt3sas_base_stop_watchdog(ioc);
11191 _scsih_flush_running_cmds(ioc);
11192 return PCI_ERS_RESULT_DISCONNECT;
11194 return PCI_ERS_RESULT_NEED_RESET;
11198 * scsih_pci_slot_reset - Called when PCI slot has been reset.
11199 * @pdev: PCI device struct
11201 * Description: This routine is called by the pci error recovery
11202 * code after the PCI slot has been reset, just before we
11203 * should resume normal operations.
11205 static pci_ers_result_t
11206 scsih_pci_slot_reset(struct pci_dev *pdev)
11208 struct Scsi_Host *shost;
11209 struct MPT3SAS_ADAPTER *ioc;
11212 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11213 return PCI_ERS_RESULT_DISCONNECT;
11215 ioc_info(ioc, "PCI error: slot reset callback!!\n");
11217 ioc->pci_error_recovery = 0;
11219 pci_restore_state(pdev);
11220 rc = mpt3sas_base_map_resources(ioc);
11222 return PCI_ERS_RESULT_DISCONNECT;
11224 ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
11225 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
11227 ioc_warn(ioc, "hard reset: %s\n",
11228 (rc == 0) ? "success" : "failed");
11231 return PCI_ERS_RESULT_RECOVERED;
11233 return PCI_ERS_RESULT_DISCONNECT;
11237 * scsih_pci_resume() - resume normal ops after PCI reset
11238 * @pdev: pointer to PCI device
11240 * Called when the error recovery driver tells us that its
11241 * OK to resume normal operation. Use completion to allow
11242 * halted scsi ops to resume.
11245 scsih_pci_resume(struct pci_dev *pdev)
11247 struct Scsi_Host *shost;
11248 struct MPT3SAS_ADAPTER *ioc;
11250 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11253 ioc_info(ioc, "PCI error: resume callback!!\n");
11255 mpt3sas_base_start_watchdog(ioc);
11256 scsi_unblock_requests(ioc->shost);
11260 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
11261 * @pdev: pointer to PCI device
11263 static pci_ers_result_t
11264 scsih_pci_mmio_enabled(struct pci_dev *pdev)
11266 struct Scsi_Host *shost;
11267 struct MPT3SAS_ADAPTER *ioc;
11269 if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11270 return PCI_ERS_RESULT_DISCONNECT;
11272 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
11274 /* TODO - dump whatever for debugging purposes */
11276 /* This called only if scsih_pci_error_detected returns
11277 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
11278 * works, no need to reset slot.
11280 return PCI_ERS_RESULT_RECOVERED;
11284 * scsih__ncq_prio_supp - Check for NCQ command priority support
11285 * @sdev: scsi device struct
11287 * This is called when a user indicates they would like to enable
11288 * ncq command priorities. This works only on SATA devices.
11290 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
11292 unsigned char *buf;
11293 bool ncq_prio_supp = false;
11295 if (!scsi_device_supports_vpd(sdev))
11296 return ncq_prio_supp;
11298 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
11300 return ncq_prio_supp;
11302 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
11303 ncq_prio_supp = (buf[213] >> 4) & 1;
11306 return ncq_prio_supp;
11309 * The pci device ids are defined in mpi/mpi2_cnfg.h.
11311 static const struct pci_device_id mpt3sas_pci_table[] = {
11312 /* Spitfire ~ 2004 */
11313 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
11314 PCI_ANY_ID, PCI_ANY_ID },
11315 /* Falcon ~ 2008 */
11316 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
11317 PCI_ANY_ID, PCI_ANY_ID },
11318 /* Liberator ~ 2108 */
11319 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
11320 PCI_ANY_ID, PCI_ANY_ID },
11321 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
11322 PCI_ANY_ID, PCI_ANY_ID },
11323 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
11324 PCI_ANY_ID, PCI_ANY_ID },
11325 /* Meteor ~ 2116 */
11326 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
11327 PCI_ANY_ID, PCI_ANY_ID },
11328 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
11329 PCI_ANY_ID, PCI_ANY_ID },
11330 /* Thunderbolt ~ 2208 */
11331 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
11332 PCI_ANY_ID, PCI_ANY_ID },
11333 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
11334 PCI_ANY_ID, PCI_ANY_ID },
11335 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
11336 PCI_ANY_ID, PCI_ANY_ID },
11337 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
11338 PCI_ANY_ID, PCI_ANY_ID },
11339 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
11340 PCI_ANY_ID, PCI_ANY_ID },
11341 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
11342 PCI_ANY_ID, PCI_ANY_ID },
11343 /* Mustang ~ 2308 */
11344 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
11345 PCI_ANY_ID, PCI_ANY_ID },
11346 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
11347 PCI_ANY_ID, PCI_ANY_ID },
11348 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
11349 PCI_ANY_ID, PCI_ANY_ID },
11350 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
11351 PCI_ANY_ID, PCI_ANY_ID },
11352 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
11353 PCI_ANY_ID, PCI_ANY_ID },
11355 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
11356 PCI_ANY_ID, PCI_ANY_ID },
11357 /* Fury ~ 3004 and 3008 */
11358 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
11359 PCI_ANY_ID, PCI_ANY_ID },
11360 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
11361 PCI_ANY_ID, PCI_ANY_ID },
11362 /* Invader ~ 3108 */
11363 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
11364 PCI_ANY_ID, PCI_ANY_ID },
11365 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
11366 PCI_ANY_ID, PCI_ANY_ID },
11367 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
11368 PCI_ANY_ID, PCI_ANY_ID },
11369 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
11370 PCI_ANY_ID, PCI_ANY_ID },
11371 /* Cutlass ~ 3216 and 3224 */
11372 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
11373 PCI_ANY_ID, PCI_ANY_ID },
11374 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
11375 PCI_ANY_ID, PCI_ANY_ID },
11376 /* Intruder ~ 3316 and 3324 */
11377 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
11378 PCI_ANY_ID, PCI_ANY_ID },
11379 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
11380 PCI_ANY_ID, PCI_ANY_ID },
11381 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
11382 PCI_ANY_ID, PCI_ANY_ID },
11383 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
11384 PCI_ANY_ID, PCI_ANY_ID },
11385 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
11386 PCI_ANY_ID, PCI_ANY_ID },
11387 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
11388 PCI_ANY_ID, PCI_ANY_ID },
11389 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
11390 PCI_ANY_ID, PCI_ANY_ID },
11391 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
11392 PCI_ANY_ID, PCI_ANY_ID },
11393 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
11394 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
11395 PCI_ANY_ID, PCI_ANY_ID },
11396 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
11397 PCI_ANY_ID, PCI_ANY_ID },
11398 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
11399 PCI_ANY_ID, PCI_ANY_ID },
11400 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
11401 PCI_ANY_ID, PCI_ANY_ID },
11402 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
11403 PCI_ANY_ID, PCI_ANY_ID },
11404 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
11405 PCI_ANY_ID, PCI_ANY_ID },
11406 /* Mercator ~ 3616*/
11407 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
11408 PCI_ANY_ID, PCI_ANY_ID },
11410 /* Aero SI 0x00E1 Configurable Secure
11411 * 0x00E2 Hard Secure
11413 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
11414 PCI_ANY_ID, PCI_ANY_ID },
11415 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
11416 PCI_ANY_ID, PCI_ANY_ID },
11419 * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
11421 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
11422 PCI_ANY_ID, PCI_ANY_ID },
11423 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
11424 PCI_ANY_ID, PCI_ANY_ID },
11426 /* Atlas PCIe Switch Management Port */
11427 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
11428 PCI_ANY_ID, PCI_ANY_ID },
11430 /* Sea SI 0x00E5 Configurable Secure
11431 * 0x00E6 Hard Secure
11433 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
11434 PCI_ANY_ID, PCI_ANY_ID },
11435 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
11436 PCI_ANY_ID, PCI_ANY_ID },
11439 * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
11441 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
11442 PCI_ANY_ID, PCI_ANY_ID },
11443 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
11444 PCI_ANY_ID, PCI_ANY_ID },
11446 {0} /* Terminating entry */
11448 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
11450 static struct pci_error_handlers _mpt3sas_err_handler = {
11451 .error_detected = scsih_pci_error_detected,
11452 .mmio_enabled = scsih_pci_mmio_enabled,
11453 .slot_reset = scsih_pci_slot_reset,
11454 .resume = scsih_pci_resume,
11457 static struct pci_driver mpt3sas_driver = {
11458 .name = MPT3SAS_DRIVER_NAME,
11459 .id_table = mpt3sas_pci_table,
11460 .probe = _scsih_probe,
11461 .remove = scsih_remove,
11462 .shutdown = scsih_shutdown,
11463 .err_handler = &_mpt3sas_err_handler,
11465 .suspend = scsih_suspend,
11466 .resume = scsih_resume,
11471 * scsih_init - main entry point for this driver.
11473 * Return: 0 success, anything else error.
11481 mpt3sas_base_initialize_callback_handler();
11483 /* queuecommand callback hander */
11484 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
11486 /* task management callback handler */
11487 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
11489 /* base internal commands callback handler */
11490 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
11491 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
11492 mpt3sas_port_enable_done);
11494 /* transport internal commands callback handler */
11495 transport_cb_idx = mpt3sas_base_register_callback_handler(
11496 mpt3sas_transport_done);
11498 /* scsih internal commands callback handler */
11499 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
11501 /* configuration page API internal commands callback handler */
11502 config_cb_idx = mpt3sas_base_register_callback_handler(
11503 mpt3sas_config_done);
11505 /* ctl module callback handler */
11506 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
11508 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
11509 _scsih_tm_tr_complete);
11511 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
11512 _scsih_tm_volume_tr_complete);
11514 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
11515 _scsih_sas_control_complete);
11517 mpt3sas_init_debugfs();
11522 * scsih_exit - exit point for this driver (when it is a module).
11524 * Return: 0 success, anything else error.
11530 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
11531 mpt3sas_base_release_callback_handler(tm_cb_idx);
11532 mpt3sas_base_release_callback_handler(base_cb_idx);
11533 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
11534 mpt3sas_base_release_callback_handler(transport_cb_idx);
11535 mpt3sas_base_release_callback_handler(scsih_cb_idx);
11536 mpt3sas_base_release_callback_handler(config_cb_idx);
11537 mpt3sas_base_release_callback_handler(ctl_cb_idx);
11539 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
11540 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
11541 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
11543 /* raid transport support */
11544 if (hbas_to_enumerate != 1)
11545 raid_class_release(mpt3sas_raid_template);
11546 if (hbas_to_enumerate != 2)
11547 raid_class_release(mpt2sas_raid_template);
11548 sas_release_transport(mpt3sas_transport_template);
11549 mpt3sas_exit_debugfs();
11553 * _mpt3sas_init - main entry point for this driver.
11555 * Return: 0 success, anything else error.
11558 _mpt3sas_init(void)
11562 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
11563 MPT3SAS_DRIVER_VERSION);
11565 mpt3sas_transport_template =
11566 sas_attach_transport(&mpt3sas_transport_functions);
11567 if (!mpt3sas_transport_template)
11570 /* No need attach mpt3sas raid functions template
11571 * if hbas_to_enumarate value is one.
11573 if (hbas_to_enumerate != 1) {
11574 mpt3sas_raid_template =
11575 raid_class_attach(&mpt3sas_raid_functions);
11576 if (!mpt3sas_raid_template) {
11577 sas_release_transport(mpt3sas_transport_template);
11582 /* No need to attach mpt2sas raid functions template
11583 * if hbas_to_enumarate value is two
11585 if (hbas_to_enumerate != 2) {
11586 mpt2sas_raid_template =
11587 raid_class_attach(&mpt2sas_raid_functions);
11588 if (!mpt2sas_raid_template) {
11589 sas_release_transport(mpt3sas_transport_template);
11594 error = scsih_init();
11600 mpt3sas_ctl_init(hbas_to_enumerate);
11602 error = pci_register_driver(&mpt3sas_driver);
11610 * _mpt3sas_exit - exit point for this driver (when it is a module).
11614 _mpt3sas_exit(void)
11616 pr_info("mpt3sas version %s unloading\n",
11617 MPT3SAS_DRIVER_VERSION);
11619 mpt3sas_ctl_exit(hbas_to_enumerate);
11621 pci_unregister_driver(&mpt3sas_driver);
11626 module_init(_mpt3sas_init);
11627 module_exit(_mpt3sas_exit);