2 * Serial Attached SCSI (SAS) class SCSI Host glue.
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
7 * This file is licensed under GPLv2.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 #include <linux/kthread.h>
27 #include <linux/firmware.h>
28 #include <linux/export.h>
29 #include <linux/ctype.h>
31 #include "sas_internal.h"
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_eh.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_transport_sas.h>
40 #include <scsi/sas_ata.h>
41 #include "../scsi_sas_internal.h"
42 #include "../scsi_transport_api.h"
43 #include "../scsi_priv.h"
45 #include <linux/err.h>
46 #include <linux/blkdev.h>
47 #include <linux/freezer.h>
48 #include <linux/gfp.h>
49 #include <linux/scatterlist.h>
50 #include <linux/libata.h>
52 /* record final status and free the task */
53 static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
55 struct task_status_struct *ts = &task->task_status;
58 if (ts->resp == SAS_TASK_UNDELIVERED) {
61 } else { /* ts->resp == SAS_TASK_COMPLETE */
62 /* task delivered, what happened afterwards? */
64 case SAS_DEV_NO_RESPONSE:
71 case SAS_DATA_UNDERRUN:
72 scsi_set_resid(sc, ts->residual);
73 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
76 case SAS_DATA_OVERRUN:
80 hs = DID_SOFT_ERROR; /* retry */
82 case SAS_DEVICE_UNKNOWN:
89 if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
90 hs = DID_SOFT_ERROR; /* retry */
94 case SAS_PROTO_RESPONSE:
95 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
96 "task; please report this\n",
97 task->dev->port->ha->sas_ha_name);
99 case SAS_ABORTED_TASK:
102 case SAM_STAT_CHECK_CONDITION:
103 memcpy(sc->sense_buffer, ts->buf,
104 min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
105 stat = SAM_STAT_CHECK_CONDITION;
113 sc->result = (hs << 16) | stat;
114 ASSIGN_SAS_TASK(sc, NULL);
115 list_del_init(&task->list);
119 static void sas_scsi_task_done(struct sas_task *task)
121 struct scsi_cmnd *sc = task->uldd_task;
122 struct domain_device *dev = task->dev;
123 struct sas_ha_struct *ha = dev->port->ha;
126 spin_lock_irqsave(&dev->done_lock, flags);
127 if (test_bit(SAS_HA_FROZEN, &ha->state))
130 ASSIGN_SAS_TASK(sc, NULL);
131 spin_unlock_irqrestore(&dev->done_lock, flags);
133 if (unlikely(!task)) {
134 /* task will be completed by the error handler */
135 SAS_DPRINTK("task done but aborted\n");
140 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
141 list_del_init(&task->list);
146 sas_end_task(sc, task);
150 static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
151 struct domain_device *dev,
154 struct sas_task *task = sas_alloc_task(gfp_flags);
160 task->uldd_task = cmd;
161 ASSIGN_SAS_TASK(cmd, task);
164 task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
166 task->ssp_task.retry_count = 1;
167 int_to_scsilun(cmd->device->lun, &lun);
168 memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
169 task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
170 task->ssp_task.cmd = cmd;
172 task->scatter = scsi_sglist(cmd);
173 task->num_scatter = scsi_sg_count(cmd);
174 task->total_xfer_len = scsi_bufflen(cmd);
175 task->data_dir = cmd->sc_data_direction;
177 task->task_done = sas_scsi_task_done;
182 int sas_queue_up(struct sas_task *task)
184 struct sas_ha_struct *sas_ha = task->dev->port->ha;
185 struct scsi_core *core = &sas_ha->core;
189 spin_lock_irqsave(&core->task_queue_lock, flags);
190 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
191 spin_unlock_irqrestore(&core->task_queue_lock, flags);
192 return -SAS_QUEUE_FULL;
194 list_add_tail(&task->list, &core->task_queue);
195 core->task_queue_size += 1;
196 spin_unlock_irqrestore(&core->task_queue_lock, flags);
197 wake_up_process(core->queue_thread);
202 int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
204 struct sas_internal *i = to_sas_internal(host->transportt);
205 struct domain_device *dev = cmd_to_domain_dev(cmd);
206 struct sas_ha_struct *sas_ha = dev->port->ha;
207 struct sas_task *task;
210 /* If the device fell off, no sense in issuing commands */
211 if (test_bit(SAS_DEV_GONE, &dev->state)) {
212 cmd->result = DID_BAD_TARGET << 16;
216 if (dev_is_sata(dev)) {
217 spin_lock_irq(dev->sata_dev.ap->lock);
218 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
219 spin_unlock_irq(dev->sata_dev.ap->lock);
223 task = sas_create_task(cmd, dev, GFP_ATOMIC);
225 return SCSI_MLQUEUE_HOST_BUSY;
227 /* Queue up, Direct Mode or Task Collector Mode. */
228 if (sas_ha->lldd_max_execute_num < 2)
229 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
231 res = sas_queue_up(task);
238 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
239 ASSIGN_SAS_TASK(cmd, NULL);
241 if (res == -SAS_QUEUE_FULL)
242 cmd->result = DID_SOFT_ERROR << 16; /* retry */
244 cmd->result = DID_ERROR << 16;
250 static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
252 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
253 struct sas_task *task = TO_SAS_TASK(cmd);
255 /* At this point, we only get called following an actual abort
256 * of the task, so we should be guaranteed not to be racing with
257 * any completions from the LLD. Task is freed after this.
259 sas_end_task(cmd, task);
261 /* now finish the command and move it on to the error
262 * handler done list, this also takes it off the
263 * error handler pending list.
265 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
268 static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
270 struct domain_device *dev = cmd_to_domain_dev(cmd);
271 struct sas_ha_struct *ha = dev->port->ha;
272 struct sas_task *task = TO_SAS_TASK(cmd);
274 if (!dev_is_sata(dev)) {
275 sas_eh_finish_cmd(cmd);
279 /* report the timeout to libata */
280 sas_end_task(cmd, task);
281 list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
284 static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
286 struct scsi_cmnd *cmd, *n;
288 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
289 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
290 cmd->device->lun == my_cmd->device->lun)
291 sas_eh_defer_cmd(cmd);
295 static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
296 struct domain_device *dev)
298 struct scsi_cmnd *cmd, *n;
300 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
301 struct domain_device *x = cmd_to_domain_dev(cmd);
304 sas_eh_finish_cmd(cmd);
308 static void sas_scsi_clear_queue_port(struct list_head *error_q,
309 struct asd_sas_port *port)
311 struct scsi_cmnd *cmd, *n;
313 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
314 struct domain_device *dev = cmd_to_domain_dev(cmd);
315 struct asd_sas_port *x = dev->port;
318 sas_eh_finish_cmd(cmd);
322 enum task_disposition {
331 static enum task_disposition sas_scsi_find_task(struct sas_task *task)
333 struct sas_ha_struct *ha = task->dev->port->ha;
336 struct sas_internal *si =
337 to_sas_internal(task->dev->port->ha->core.shost->transportt);
339 if (ha->lldd_max_execute_num > 1) {
340 struct scsi_core *core = &ha->core;
341 struct sas_task *t, *n;
343 mutex_lock(&core->task_queue_flush);
344 spin_lock_irqsave(&core->task_queue_lock, flags);
345 list_for_each_entry_safe(t, n, &core->task_queue, list)
347 list_del_init(&t->list);
350 spin_unlock_irqrestore(&core->task_queue_lock, flags);
351 mutex_unlock(&core->task_queue_flush);
354 return TASK_IS_NOT_AT_HA;
357 for (i = 0; i < 5; i++) {
358 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
359 res = si->dft->lldd_abort_task(task);
361 spin_lock_irqsave(&task->task_state_lock, flags);
362 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
363 spin_unlock_irqrestore(&task->task_state_lock, flags);
364 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
368 spin_unlock_irqrestore(&task->task_state_lock, flags);
370 if (res == TMF_RESP_FUNC_COMPLETE) {
371 SAS_DPRINTK("%s: task 0x%p is aborted\n",
373 return TASK_IS_ABORTED;
374 } else if (si->dft->lldd_query_task) {
375 SAS_DPRINTK("%s: querying task 0x%p\n",
377 res = si->dft->lldd_query_task(task);
379 case TMF_RESP_FUNC_SUCC:
380 SAS_DPRINTK("%s: task 0x%p at LU\n",
382 return TASK_IS_AT_LU;
383 case TMF_RESP_FUNC_COMPLETE:
384 SAS_DPRINTK("%s: task 0x%p not at LU\n",
386 return TASK_IS_NOT_AT_LU;
387 case TMF_RESP_FUNC_FAILED:
388 SAS_DPRINTK("%s: task 0x%p failed to abort\n",
390 return TASK_ABORT_FAILED;
398 static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
400 int res = TMF_RESP_FUNC_FAILED;
402 struct sas_internal *i =
403 to_sas_internal(dev->port->ha->core.shost->transportt);
405 int_to_scsilun(cmd->device->lun, &lun);
407 SAS_DPRINTK("eh: device %llx LUN %llx has the task\n",
408 SAS_ADDR(dev->sas_addr),
411 if (i->dft->lldd_abort_task_set)
412 res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
414 if (res == TMF_RESP_FUNC_FAILED) {
415 if (i->dft->lldd_clear_task_set)
416 res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
419 if (res == TMF_RESP_FUNC_FAILED) {
420 if (i->dft->lldd_lu_reset)
421 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
427 static int sas_recover_I_T(struct domain_device *dev)
429 int res = TMF_RESP_FUNC_FAILED;
430 struct sas_internal *i =
431 to_sas_internal(dev->port->ha->core.shost->transportt);
433 SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
434 SAS_ADDR(dev->sas_addr));
436 if (i->dft->lldd_I_T_nexus_reset)
437 res = i->dft->lldd_I_T_nexus_reset(dev);
442 /* take a reference on the last known good phy for this device */
443 struct sas_phy *sas_get_local_phy(struct domain_device *dev)
445 struct sas_ha_struct *ha = dev->port->ha;
449 /* a published domain device always has a valid phy, it may be
450 * stale, but it is never NULL
454 spin_lock_irqsave(&ha->phy_port_lock, flags);
456 get_device(&phy->dev);
457 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
461 EXPORT_SYMBOL_GPL(sas_get_local_phy);
463 static void sas_wait_eh(struct domain_device *dev)
465 struct sas_ha_struct *ha = dev->port->ha;
468 if (dev_is_sata(dev)) {
469 ata_port_wait_eh(dev->sata_dev.ap);
473 spin_lock_irq(&ha->lock);
475 while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
476 prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
477 spin_unlock_irq(&ha->lock);
479 spin_lock_irq(&ha->lock);
481 finish_wait(&ha->eh_wait_q, &wait);
483 spin_unlock_irq(&ha->lock);
485 /* make sure SCSI EH is complete */
486 if (scsi_host_in_recovery(ha->core.shost)) {
491 EXPORT_SYMBOL(sas_wait_eh);
493 static int sas_queue_reset(struct domain_device *dev, int reset_type,
496 struct sas_ha_struct *ha = dev->port->ha;
497 int scheduled = 0, tries = 100;
499 /* ata: promote lun reset to bus reset */
500 if (dev_is_sata(dev)) {
501 sas_ata_schedule_reset(dev);
503 sas_ata_wait_eh(dev);
507 while (!scheduled && tries--) {
508 spin_lock_irq(&ha->lock);
509 if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
510 !test_bit(reset_type, &dev->state)) {
513 list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
514 set_bit(SAS_DEV_EH_PENDING, &dev->state);
515 set_bit(reset_type, &dev->state);
516 int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
517 scsi_schedule_eh(ha->core.shost);
519 spin_unlock_irq(&ha->lock);
528 SAS_DPRINTK("%s reset of %s failed\n",
529 reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
530 dev_name(&dev->rphy->dev));
535 int sas_eh_abort_handler(struct scsi_cmnd *cmd)
538 struct sas_task *task = TO_SAS_TASK(cmd);
539 struct Scsi_Host *host = cmd->device->host;
540 struct sas_internal *i = to_sas_internal(host->transportt);
542 if (current != host->ehandler)
545 if (!i->dft->lldd_abort_task)
548 res = i->dft->lldd_abort_task(task);
549 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
554 EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
556 /* Attempt to send a LUN reset message to a device */
557 int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
561 struct Scsi_Host *host = cmd->device->host;
562 struct domain_device *dev = cmd_to_domain_dev(cmd);
563 struct sas_internal *i = to_sas_internal(host->transportt);
565 if (current != host->ehandler)
566 return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
568 int_to_scsilun(cmd->device->lun, &lun);
570 if (!i->dft->lldd_lu_reset)
573 res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
574 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
580 int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
583 struct Scsi_Host *host = cmd->device->host;
584 struct domain_device *dev = cmd_to_domain_dev(cmd);
585 struct sas_internal *i = to_sas_internal(host->transportt);
587 if (current != host->ehandler)
588 return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
590 if (!i->dft->lldd_I_T_nexus_reset)
593 res = i->dft->lldd_I_T_nexus_reset(dev);
594 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
601 /* Try to reset a device */
602 static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
605 struct Scsi_Host *shost = cmd->device->host;
607 if (!shost->hostt->eh_device_reset_handler)
610 res = shost->hostt->eh_device_reset_handler(cmd);
615 if (shost->hostt->eh_bus_reset_handler)
616 return shost->hostt->eh_bus_reset_handler(cmd);
621 static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
623 struct scsi_cmnd *cmd, *n;
624 enum task_disposition res = TASK_IS_DONE;
625 int tmf_resp, need_reset;
626 struct sas_internal *i = to_sas_internal(shost->transportt);
628 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
631 /* clean out any commands that won the completion vs eh race */
632 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
633 struct domain_device *dev = cmd_to_domain_dev(cmd);
634 struct sas_task *task;
636 spin_lock_irqsave(&dev->done_lock, flags);
637 /* by this point the lldd has either observed
638 * SAS_HA_FROZEN and is leaving the task alone, or has
639 * won the race with eh and decided to complete it
641 task = TO_SAS_TASK(cmd);
642 spin_unlock_irqrestore(&dev->done_lock, flags);
645 list_move_tail(&cmd->eh_entry, &done);
649 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
650 struct sas_task *task = TO_SAS_TASK(cmd);
652 list_del_init(&cmd->eh_entry);
654 spin_lock_irqsave(&task->task_state_lock, flags);
655 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
656 spin_unlock_irqrestore(&task->task_state_lock, flags);
659 SAS_DPRINTK("%s: task 0x%p requests reset\n",
664 SAS_DPRINTK("trying to find task 0x%p\n", task);
665 res = sas_scsi_find_task(task);
670 case TASK_IS_NOT_AT_HA:
671 SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
673 cmd->retries ? "retry" : "aborted");
676 sas_eh_finish_cmd(cmd);
679 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
681 sas_eh_defer_cmd(cmd);
683 case TASK_IS_ABORTED:
684 SAS_DPRINTK("%s: task 0x%p is aborted\n",
686 sas_eh_defer_cmd(cmd);
689 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
691 tmf_resp = sas_recover_lu(task->dev, cmd);
692 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
693 SAS_DPRINTK("dev %016llx LU %llx is "
697 sas_eh_defer_cmd(cmd);
698 sas_scsi_clear_queue_lu(work_q, cmd);
702 case TASK_IS_NOT_AT_LU:
703 case TASK_ABORT_FAILED:
704 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
706 tmf_resp = sas_recover_I_T(task->dev);
707 if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
708 tmf_resp == -ENODEV) {
709 struct domain_device *dev = task->dev;
710 SAS_DPRINTK("I_T %016llx recovered\n",
711 SAS_ADDR(task->dev->sas_addr));
712 sas_eh_finish_cmd(cmd);
713 sas_scsi_clear_queue_I_T(work_q, dev);
716 /* Hammer time :-) */
717 try_to_reset_cmd_device(cmd);
718 if (i->dft->lldd_clear_nexus_port) {
719 struct asd_sas_port *port = task->dev->port;
720 SAS_DPRINTK("clearing nexus for port:%d\n",
722 res = i->dft->lldd_clear_nexus_port(port);
723 if (res == TMF_RESP_FUNC_COMPLETE) {
724 SAS_DPRINTK("clear nexus port:%d "
725 "succeeded\n", port->id);
726 sas_eh_finish_cmd(cmd);
727 sas_scsi_clear_queue_port(work_q,
732 if (i->dft->lldd_clear_nexus_ha) {
733 SAS_DPRINTK("clear nexus ha\n");
734 res = i->dft->lldd_clear_nexus_ha(ha);
735 if (res == TMF_RESP_FUNC_COMPLETE) {
736 SAS_DPRINTK("clear nexus ha "
738 sas_eh_finish_cmd(cmd);
742 /* If we are here -- this means that no amount
743 * of effort could recover from errors. Quite
744 * possibly the HA just disappeared.
746 SAS_DPRINTK("error from device %llx, LUN %llx "
747 "couldn't be recovered in any way\n",
748 SAS_ADDR(task->dev->sas_addr),
751 sas_eh_finish_cmd(cmd);
756 list_splice_tail(&done, work_q);
757 list_splice_tail_init(&ha->eh_ata_q, work_q);
761 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
762 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
763 sas_eh_finish_cmd(cmd);
767 static void sas_eh_handle_resets(struct Scsi_Host *shost)
769 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
770 struct sas_internal *i = to_sas_internal(shost->transportt);
772 /* handle directed resets to sas devices */
773 spin_lock_irq(&ha->lock);
774 while (!list_empty(&ha->eh_dev_q)) {
775 struct domain_device *dev;
776 struct ssp_device *ssp;
778 ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
779 list_del_init(&ssp->eh_list_node);
780 dev = container_of(ssp, typeof(*dev), ssp_dev);
781 kref_get(&dev->kref);
782 WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
784 spin_unlock_irq(&ha->lock);
786 if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
787 i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
789 if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
790 i->dft->lldd_I_T_nexus_reset(dev);
793 spin_lock_irq(&ha->lock);
794 clear_bit(SAS_DEV_EH_PENDING, &dev->state);
797 spin_unlock_irq(&ha->lock);
801 void sas_scsi_recover_host(struct Scsi_Host *shost)
803 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
804 LIST_HEAD(eh_work_q);
811 spin_lock_irq(shost->host_lock);
812 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
813 spin_unlock_irq(shost->host_lock);
815 SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
816 __func__, atomic_read(&shost->host_busy), shost->host_failed);
818 * Deal with commands that still have SAS tasks (i.e. they didn't
819 * complete via the normal sas_task completion mechanism),
820 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
822 set_bit(SAS_HA_FROZEN, &ha->state);
823 sas_eh_handle_sas_errors(shost, &eh_work_q);
824 clear_bit(SAS_HA_FROZEN, &ha->state);
825 if (list_empty(&eh_work_q))
829 * Now deal with SCSI commands that completed ok but have a an error
830 * code (and hopefully sense data) attached. This is roughly what
831 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
832 * command we see here has no sas_task and is thus unknown to the HA.
834 sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
835 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
836 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
839 if (ha->lldd_max_execute_num > 1)
840 wake_up_process(ha->core.queue_thread);
842 sas_eh_handle_resets(shost);
844 /* now link into libata eh --- if we have any ata devices */
845 sas_ata_strategy_handler(shost);
847 scsi_eh_flush_done_q(&ha->eh_done_q);
849 /* check if any new eh work was scheduled during the last run */
850 spin_lock_irq(&ha->lock);
851 if (ha->eh_active == 0) {
852 shost->host_eh_scheduled = 0;
855 spin_unlock_irq(&ha->lock);
860 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
861 __func__, atomic_read(&shost->host_busy),
862 shost->host_failed, tries);
865 enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
867 scmd_dbg(cmd, "command %p timed out\n", cmd);
869 return BLK_EH_NOT_HANDLED;
872 int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
874 struct domain_device *dev = sdev_to_domain_dev(sdev);
876 if (dev_is_sata(dev))
877 return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
882 struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
884 struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
885 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
886 struct domain_device *found_dev = NULL;
890 spin_lock_irqsave(&ha->phy_port_lock, flags);
891 for (i = 0; i < ha->num_phys; i++) {
892 struct asd_sas_port *port = ha->sas_port[i];
893 struct domain_device *dev;
895 spin_lock(&port->dev_list_lock);
896 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
897 if (rphy == dev->rphy) {
899 spin_unlock(&port->dev_list_lock);
903 spin_unlock(&port->dev_list_lock);
906 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
911 int sas_target_alloc(struct scsi_target *starget)
913 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
914 struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
919 kref_get(&found_dev->kref);
920 starget->hostdata = found_dev;
924 #define SAS_DEF_QD 256
926 int sas_slave_configure(struct scsi_device *scsi_dev)
928 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
929 struct sas_ha_struct *sas_ha;
931 BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
933 if (dev_is_sata(dev)) {
934 ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
938 sas_ha = dev->port->ha;
940 sas_read_port_mode_page(scsi_dev);
942 if (scsi_dev->tagged_supported) {
943 scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
944 scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
946 SAS_DPRINTK("device %llx, LUN %llx doesn't support "
947 "TCQ\n", SAS_ADDR(dev->sas_addr),
949 scsi_dev->tagged_supported = 0;
950 scsi_set_tag_type(scsi_dev, 0);
951 scsi_deactivate_tcq(scsi_dev, 1);
954 scsi_dev->allow_restart = 1;
959 int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
961 struct domain_device *dev = sdev_to_domain_dev(sdev);
963 if (dev_is_sata(dev))
964 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth,
968 case SCSI_QDEPTH_DEFAULT:
969 case SCSI_QDEPTH_RAMP_UP:
970 if (!sdev->tagged_supported)
972 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
974 case SCSI_QDEPTH_QFULL:
975 scsi_track_queue_full(sdev, depth);
984 int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
986 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
988 if (dev_is_sata(dev))
991 if (!scsi_dev->tagged_supported)
994 scsi_deactivate_tcq(scsi_dev, 1);
996 scsi_set_tag_type(scsi_dev, qt);
997 scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
1002 int sas_bios_param(struct scsi_device *scsi_dev,
1003 struct block_device *bdev,
1004 sector_t capacity, int *hsc)
1008 sector_div(capacity, 255*63);
1014 /* ---------- Task Collector Thread implementation ---------- */
1016 static void sas_queue(struct sas_ha_struct *sas_ha)
1018 struct scsi_core *core = &sas_ha->core;
1019 unsigned long flags;
1023 struct sas_internal *i = to_sas_internal(core->shost->transportt);
1025 mutex_lock(&core->task_queue_flush);
1026 spin_lock_irqsave(&core->task_queue_lock, flags);
1027 while (!kthread_should_stop() &&
1028 !list_empty(&core->task_queue) &&
1029 !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
1031 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
1032 if (can_queue >= 0) {
1033 can_queue = core->task_queue_size;
1034 list_splice_init(&core->task_queue, &q);
1036 struct list_head *a, *n;
1038 can_queue = sas_ha->lldd_queue_size;
1039 list_for_each_safe(a, n, &core->task_queue) {
1040 list_move_tail(a, &q);
1041 if (--can_queue == 0)
1044 can_queue = sas_ha->lldd_queue_size;
1046 core->task_queue_size -= can_queue;
1047 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1049 struct sas_task *task = list_entry(q.next,
1053 res = i->dft->lldd_execute_task(task, can_queue,
1056 __list_add(&q, task->list.prev, &task->list);
1058 spin_lock_irqsave(&core->task_queue_lock, flags);
1060 list_splice_init(&q, &core->task_queue); /*at head*/
1061 core->task_queue_size += can_queue;
1064 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1065 mutex_unlock(&core->task_queue_flush);
1069 * sas_queue_thread -- The Task Collector thread
1070 * @_sas_ha: pointer to struct sas_ha
1072 static int sas_queue_thread(void *_sas_ha)
1074 struct sas_ha_struct *sas_ha = _sas_ha;
1077 set_current_state(TASK_INTERRUPTIBLE);
1080 if (kthread_should_stop())
1087 int sas_init_queue(struct sas_ha_struct *sas_ha)
1089 struct scsi_core *core = &sas_ha->core;
1091 spin_lock_init(&core->task_queue_lock);
1092 mutex_init(&core->task_queue_flush);
1093 core->task_queue_size = 0;
1094 INIT_LIST_HEAD(&core->task_queue);
1096 core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
1097 "sas_queue_%d", core->shost->host_no);
1098 if (IS_ERR(core->queue_thread))
1099 return PTR_ERR(core->queue_thread);
1103 void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
1105 unsigned long flags;
1106 struct scsi_core *core = &sas_ha->core;
1107 struct sas_task *task, *n;
1109 kthread_stop(core->queue_thread);
1111 if (!list_empty(&core->task_queue))
1112 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
1113 SAS_ADDR(sas_ha->sas_addr));
1115 spin_lock_irqsave(&core->task_queue_lock, flags);
1116 list_for_each_entry_safe(task, n, &core->task_queue, list) {
1117 struct scsi_cmnd *cmd = task->uldd_task;
1119 list_del_init(&task->list);
1121 ASSIGN_SAS_TASK(cmd, NULL);
1122 sas_free_task(task);
1123 cmd->result = DID_ABORT << 16;
1124 cmd->scsi_done(cmd);
1126 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1130 * Tell an upper layer that it needs to initiate an abort for a given task.
1131 * This should only ever be called by an LLDD.
1133 void sas_task_abort(struct sas_task *task)
1135 struct scsi_cmnd *sc = task->uldd_task;
1137 /* Escape for libsas internal commands */
1139 struct sas_task_slow *slow = task->slow_task;
1143 if (!del_timer(&slow->timer))
1145 slow->timer.function(slow->timer.data);
1149 if (dev_is_sata(task->dev)) {
1150 sas_ata_task_abort(task);
1152 struct request_queue *q = sc->device->request_queue;
1153 unsigned long flags;
1155 spin_lock_irqsave(q->queue_lock, flags);
1156 blk_abort_request(sc->request);
1157 spin_unlock_irqrestore(q->queue_lock, flags);
1161 void sas_target_destroy(struct scsi_target *starget)
1163 struct domain_device *found_dev = starget->hostdata;
1168 starget->hostdata = NULL;
1169 sas_put_device(found_dev);
1172 static void sas_parse_addr(u8 *sas_addr, const char *p)
1175 for (i = 0; i < SAS_ADDR_SIZE; i++) {
1179 h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
1181 l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
1183 sas_addr[i] = (h<<4) | l;
1187 #define SAS_STRING_ADDR_SIZE 16
1189 int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
1192 const struct firmware *fw;
1194 res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
1198 if (fw->size < SAS_STRING_ADDR_SIZE) {
1203 sas_parse_addr(addr, fw->data);
1206 release_firmware(fw);
1209 EXPORT_SYMBOL_GPL(sas_request_addr);
1211 EXPORT_SYMBOL_GPL(sas_queuecommand);
1212 EXPORT_SYMBOL_GPL(sas_target_alloc);
1213 EXPORT_SYMBOL_GPL(sas_slave_configure);
1214 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
1215 EXPORT_SYMBOL_GPL(sas_change_queue_type);
1216 EXPORT_SYMBOL_GPL(sas_bios_param);
1217 EXPORT_SYMBOL_GPL(sas_task_abort);
1218 EXPORT_SYMBOL_GPL(sas_phy_reset);
1219 EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
1220 EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
1221 EXPORT_SYMBOL_GPL(sas_target_destroy);
1222 EXPORT_SYMBOL_GPL(sas_ioctl);