1 // SPDX-License-Identifier: GPL-2.0-only
3 * Aic94xx SAS/SATA Tasks
5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
9 #include <linux/spinlock.h>
11 #include "aic94xx_sas.h"
12 #include "aic94xx_hwi.h"
14 static void asd_unbuild_ata_ascb(struct asd_ascb *a);
15 static void asd_unbuild_smp_ascb(struct asd_ascb *a);
16 static void asd_unbuild_ssp_ascb(struct asd_ascb *a);
18 static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
22 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
23 asd_ha->seq.can_queue += num;
24 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
27 /* DMA_... to our direction translation.
29 static const u8 data_dir_flags[] = {
30 [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
31 [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
32 [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
33 [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
36 static int asd_map_scatterlist(struct sas_task *task,
40 struct asd_ascb *ascb = task->lldd_task;
41 struct asd_ha_struct *asd_ha = ascb->ha;
42 struct scatterlist *sc;
45 if (task->data_dir == DMA_NONE)
48 if (task->num_scatter == 0) {
49 void *p = task->scatter;
50 dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
53 if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
56 sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
57 sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
58 sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
62 /* STP tasks come from libata which has already mapped
64 if (sas_protocol_ata(task->task_proto))
65 num_sg = task->num_scatter;
67 num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
68 task->num_scatter, task->data_dir);
75 ascb->sg_arr = asd_alloc_coherent(asd_ha,
76 num_sg*sizeof(struct sg_el),
82 for_each_sg(task->scatter, sc, num_sg, i) {
84 &((struct sg_el *)ascb->sg_arr->vaddr)[i];
85 sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc));
86 sg->size = cpu_to_le32((u32)sg_dma_len(sc));
88 sg->flags |= ASD_SG_EL_LIST_EOL;
91 for_each_sg(task->scatter, sc, 2, i) {
93 cpu_to_le64((u64)sg_dma_address(sc));
94 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
96 sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr);
97 sg_arr[1].flags |= ASD_SG_EL_LIST_EOS;
99 memset(&sg_arr[2], 0, sizeof(*sg_arr));
100 sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle);
103 for_each_sg(task->scatter, sc, num_sg, i) {
105 cpu_to_le64((u64)sg_dma_address(sc));
106 sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc));
108 sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL;
113 if (sas_protocol_ata(task->task_proto))
114 dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
115 task->num_scatter, task->data_dir);
119 static void asd_unmap_scatterlist(struct asd_ascb *ascb)
121 struct asd_ha_struct *asd_ha = ascb->ha;
122 struct sas_task *task = ascb->uldd_task;
124 if (task->data_dir == DMA_NONE)
127 if (task->num_scatter == 0) {
128 dma_addr_t dma = (dma_addr_t)
129 le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
130 dma_unmap_single(&ascb->ha->pcidev->dev, dma,
131 task->total_xfer_len, task->data_dir);
135 asd_free_coherent(asd_ha, ascb->sg_arr);
136 if (task->task_proto != SAS_PROTOCOL_STP)
137 dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
138 task->num_scatter, task->data_dir);
141 /* ---------- Task complete tasklet ---------- */
143 static void asd_get_response_tasklet(struct asd_ascb *ascb,
144 struct done_list_struct *dl)
146 struct asd_ha_struct *asd_ha = ascb->ha;
147 struct sas_task *task = ascb->uldd_task;
148 struct task_status_struct *ts = &task->task_status;
150 struct tc_resp_sb_struct {
154 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
156 /* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */
157 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
158 struct asd_ascb *escb;
159 struct asd_dma_tok *edb;
162 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
163 escb = asd_tc_index_find(&asd_ha->seq,
164 (int)le16_to_cpu(resp_sb->index_escb));
165 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
168 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
172 ts->buf_valid_size = 0;
173 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
175 if (task->task_proto == SAS_PROTOCOL_SSP) {
176 struct ssp_response_iu *iu =
177 r + 16 + sizeof(struct ssp_frame_hdr);
179 ts->residual = le32_to_cpu(*(__le32 *)r);
181 sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu);
183 struct ata_task_resp *resp = (void *) &ts->buf[0];
185 ts->residual = le32_to_cpu(*(__le32 *)r);
187 if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
188 resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
189 memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
190 ts->buf_valid_size = sizeof(*resp);
194 asd_invalidate_edb(escb, edb_id);
197 static void asd_task_tasklet_complete(struct asd_ascb *ascb,
198 struct done_list_struct *dl)
200 struct sas_task *task = ascb->uldd_task;
201 struct task_status_struct *ts = &task->task_status;
203 u8 opcode = dl->opcode;
205 asd_can_dequeue(ascb->ha, 1);
210 ts->resp = SAS_TASK_COMPLETE;
211 ts->stat = SAS_SAM_STAT_GOOD;
214 ts->resp = SAS_TASK_COMPLETE;
215 ts->stat = SAS_DATA_UNDERRUN;
216 ts->residual = le32_to_cpu(*(__le32 *)dl->status_block);
219 ts->resp = SAS_TASK_COMPLETE;
220 ts->stat = SAS_DATA_OVERRUN;
225 ts->resp = SAS_TASK_COMPLETE;
226 ts->stat = SAS_PROTO_RESPONSE;
227 asd_get_response_tasklet(ascb, dl);
230 ts->resp = SAS_TASK_UNDELIVERED;
231 ts->stat = SAS_OPEN_REJECT;
232 if (dl->status_block[1] & 2)
233 ts->open_rej_reason = 1 + dl->status_block[2];
234 else if (dl->status_block[1] & 1)
235 ts->open_rej_reason = (dl->status_block[2] >> 4)+10;
237 ts->open_rej_reason = SAS_OREJ_UNKNOWN;
240 ts->resp = SAS_TASK_UNDELIVERED;
241 ts->stat = SAS_OPEN_TO;
245 ts->resp = SAS_TASK_UNDELIVERED;
246 ts->stat = SAS_PHY_DOWN;
249 ts->resp = SAS_TASK_COMPLETE;
250 ts->stat = SAS_PHY_DOWN;
256 case TF_SMP_XMIT_RCV_ERR:
257 case TC_ATA_R_ERR_RECV:
258 ts->resp = SAS_TASK_COMPLETE;
259 ts->stat = SAS_INTERRUPTED;
265 ts->resp = SAS_TASK_UNDELIVERED;
266 ts->stat = SAS_DEV_NO_RESPONSE;
269 ts->resp = SAS_TASK_COMPLETE;
270 ts->stat = SAS_NAK_R_ERR;
272 case TA_I_T_NEXUS_LOSS:
273 opcode = dl->status_block[0];
275 case TF_INV_CONN_HANDLE:
276 ts->resp = SAS_TASK_UNDELIVERED;
277 ts->stat = SAS_DEVICE_UNKNOWN;
279 case TF_REQUESTED_N_PENDING:
280 ts->resp = SAS_TASK_UNDELIVERED;
281 ts->stat = SAS_PENDING;
283 case TC_TASK_CLEARED:
285 ts->resp = SAS_TASK_COMPLETE;
286 ts->stat = SAS_ABORTED_TASK;
292 case TF_TMF_TAG_FREE:
293 case TF_TMF_TASK_DONE:
294 case TF_TMF_NO_CONN_HANDLE:
297 case TF_DATA_OFFS_ERR:
298 ts->resp = SAS_TASK_UNDELIVERED;
299 ts->stat = SAS_DEV_NO_RESPONSE;
302 case TC_LINK_ADM_RESP:
305 case TC_PARTIAL_SG_LIST:
307 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
311 switch (task->task_proto) {
312 case SAS_PROTOCOL_SATA:
313 case SAS_PROTOCOL_STP:
314 asd_unbuild_ata_ascb(ascb);
316 case SAS_PROTOCOL_SMP:
317 asd_unbuild_smp_ascb(ascb);
319 case SAS_PROTOCOL_SSP:
320 asd_unbuild_ssp_ascb(ascb);
326 spin_lock_irqsave(&task->task_state_lock, flags);
327 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
328 task->task_state_flags |= SAS_TASK_STATE_DONE;
329 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
330 struct completion *completion = ascb->completion;
331 spin_unlock_irqrestore(&task->task_state_lock, flags);
332 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
333 "stat 0x%x but aborted by upper layer!\n",
334 task, opcode, ts->resp, ts->stat);
336 complete(completion);
338 spin_unlock_irqrestore(&task->task_state_lock, flags);
339 task->lldd_task = NULL;
342 task->task_done(task);
346 /* ---------- ATA ---------- */
348 static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
351 struct domain_device *dev = task->dev;
358 if (unlikely(task->ata_task.device_control_reg_update))
359 scb->header.opcode = CONTROL_ATA_DEV;
360 else if (dev->sata_dev.class == ATA_DEV_ATAPI)
361 scb->header.opcode = INITIATE_ATAPI_TASK;
363 scb->header.opcode = INITIATE_ATA_TASK;
365 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
366 if (dev->port->oob_mode == SAS_OOB_MODE)
367 scb->ata_task.proto_conn_rate |= dev->linkrate;
369 scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
370 scb->ata_task.fis = task->ata_task.fis;
371 if (likely(!task->ata_task.device_control_reg_update))
372 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
373 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
374 if (dev->sata_dev.class == ATA_DEV_ATAPI)
375 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
377 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
378 scb->ata_task.conn_handle = cpu_to_le16(
379 (u16)(unsigned long)dev->lldd_dev);
381 if (likely(!task->ata_task.device_control_reg_update)) {
383 if (task->ata_task.dma_xfer)
384 flags |= DATA_XFER_MODE_DMA;
385 if (task->ata_task.use_ncq &&
386 dev->sata_dev.class != ATA_DEV_ATAPI)
387 flags |= ATA_Q_TYPE_NCQ;
388 flags |= data_dir_flags[task->data_dir];
389 scb->ata_task.ata_flags = flags;
391 scb->ata_task.retry_count = task->ata_task.retry_count;
394 if (task->ata_task.set_affil_pol)
395 flags |= SET_AFFIL_POLICY;
396 if (task->ata_task.stp_affil_pol)
397 flags |= STP_AFFIL_POLICY;
398 scb->ata_task.flags = flags;
400 ascb->tasklet_complete = asd_task_tasklet_complete;
402 if (likely(!task->ata_task.device_control_reg_update))
403 res = asd_map_scatterlist(task, scb->ata_task.sg_element,
409 static void asd_unbuild_ata_ascb(struct asd_ascb *a)
411 asd_unmap_scatterlist(a);
414 /* ---------- SMP ---------- */
416 static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
419 struct asd_ha_struct *asd_ha = ascb->ha;
420 struct domain_device *dev = task->dev;
423 dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
425 dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
430 scb->header.opcode = INITIATE_SMP_TASK;
432 scb->smp_task.proto_conn_rate = dev->linkrate;
434 scb->smp_task.smp_req.bus_addr =
435 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
436 scb->smp_task.smp_req.size =
437 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4);
439 scb->smp_task.smp_resp.bus_addr =
440 cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp));
441 scb->smp_task.smp_resp.size =
442 cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
444 scb->smp_task.sister_scb = cpu_to_le16(0xFFFF);
445 scb->smp_task.conn_handle = cpu_to_le16((u16)
446 (unsigned long)dev->lldd_dev);
448 ascb->tasklet_complete = asd_task_tasklet_complete;
453 static void asd_unbuild_smp_ascb(struct asd_ascb *a)
455 struct sas_task *task = a->uldd_task;
458 dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
460 dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
464 /* ---------- SSP ---------- */
466 static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
469 struct domain_device *dev = task->dev;
475 scb->header.opcode = INITIATE_SSP_TASK;
477 scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */
478 scb->ssp_task.proto_conn_rate |= dev->linkrate;
479 scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len);
480 scb->ssp_task.ssp_frame.frame_type = SSP_DATA;
481 memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr,
482 HASHED_SAS_ADDR_SIZE);
483 memcpy(scb->ssp_task.ssp_frame.hashed_src_addr,
484 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
485 scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
487 memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
488 if (task->ssp_task.enable_first_burst)
489 scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
490 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
491 scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
492 memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
493 task->ssp_task.cmd->cmd_len);
495 scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF);
496 scb->ssp_task.conn_handle = cpu_to_le16(
497 (u16)(unsigned long)dev->lldd_dev);
498 scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
499 scb->ssp_task.retry_count = scb->ssp_task.retry_count;
501 ascb->tasklet_complete = asd_task_tasklet_complete;
503 res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags);
508 static void asd_unbuild_ssp_ascb(struct asd_ascb *a)
510 asd_unmap_scatterlist(a);
513 /* ---------- Execute Task ---------- */
515 static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
520 spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags);
521 if ((asd_ha->seq.can_queue - num) < 0)
522 res = -SAS_QUEUE_FULL;
524 asd_ha->seq.can_queue -= num;
525 spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
530 int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
534 struct sas_task *t = task;
535 struct asd_ascb *ascb = NULL, *a;
536 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
538 res = asd_can_queue(asd_ha, 1);
543 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
549 __list_add(&alist, ascb->list.prev, &ascb->list);
550 list_for_each_entry(a, &alist, list) {
555 list_for_each_entry(a, &alist, list) {
558 if (t->task_proto & SAS_PROTOCOL_STP)
559 t->task_proto = SAS_PROTOCOL_STP;
560 switch (t->task_proto) {
561 case SAS_PROTOCOL_SATA:
562 case SAS_PROTOCOL_STP:
563 res = asd_build_ata_ascb(a, t, gfp_flags);
565 case SAS_PROTOCOL_SMP:
566 res = asd_build_smp_ascb(a, t, gfp_flags);
568 case SAS_PROTOCOL_SSP:
569 res = asd_build_ssp_ascb(a, t, gfp_flags);
572 asd_printk("unknown sas_task proto: 0x%x\n",
580 list_del_init(&alist);
582 res = asd_post_ascb_list(asd_ha, ascb, 1);
585 __list_add(&alist, ascb->list.prev, &ascb->list);
592 struct asd_ascb *b = a;
593 list_for_each_entry(a, &alist, list) {
597 switch (t->task_proto) {
598 case SAS_PROTOCOL_SATA:
599 case SAS_PROTOCOL_STP:
600 asd_unbuild_ata_ascb(a);
602 case SAS_PROTOCOL_SMP:
603 asd_unbuild_smp_ascb(a);
605 case SAS_PROTOCOL_SSP:
606 asd_unbuild_ssp_ascb(a);
614 list_del_init(&alist);
617 asd_ascb_free_list(ascb);
618 asd_can_dequeue(asd_ha, 1);