2 * Copyright © 2006, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/types.h>
22 #include <asm/hardware.h>
23 #include <asm/hardware/iop_adma.h>
25 /* Memory copy units */
26 #define DMA_CCR(chan) (chan->mmr_base + 0x0)
27 #define DMA_CSR(chan) (chan->mmr_base + 0x4)
28 #define DMA_DAR(chan) (chan->mmr_base + 0xc)
29 #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
30 #define DMA_PADR(chan) (chan->mmr_base + 0x14)
31 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32 #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
33 #define DMA_BCR(chan) (chan->mmr_base + 0x20)
34 #define DMA_DCR(chan) (chan->mmr_base + 0x24)
36 /* Application accelerator unit */
37 #define AAU_ACR(chan) (chan->mmr_base + 0x0)
38 #define AAU_ASR(chan) (chan->mmr_base + 0x4)
39 #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
40 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41 #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
42 #define AAU_DAR(chan) (chan->mmr_base + 0x20)
43 #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
44 #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
45 #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46 #define AAU_EDCR0_IDX 8
47 #define AAU_EDCR1_IDX 17
48 #define AAU_EDCR2_IDX 26
54 struct iop3xx_aau_desc_ctrl {
55 unsigned int int_en:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int blk_ctrl:2;
65 unsigned int dual_xor_en:1;
66 unsigned int tx_complete:1;
67 unsigned int zero_result_err:1;
68 unsigned int zero_result_en:1;
69 unsigned int dest_write_en:1;
72 struct iop3xx_aau_e_desc_ctrl {
73 unsigned int reserved:1;
74 unsigned int blk1_cmd_ctrl:3;
75 unsigned int blk2_cmd_ctrl:3;
76 unsigned int blk3_cmd_ctrl:3;
77 unsigned int blk4_cmd_ctrl:3;
78 unsigned int blk5_cmd_ctrl:3;
79 unsigned int blk6_cmd_ctrl:3;
80 unsigned int blk7_cmd_ctrl:3;
81 unsigned int blk8_cmd_ctrl:3;
82 unsigned int reserved2:7;
85 struct iop3xx_dma_desc_ctrl {
86 unsigned int pci_transaction:4;
87 unsigned int int_en:1;
88 unsigned int dac_cycle_en:1;
89 unsigned int mem_to_mem_en:1;
90 unsigned int crc_data_tx_en:1;
91 unsigned int crc_gen_en:1;
92 unsigned int crc_seed_dis:1;
93 unsigned int reserved:21;
94 unsigned int crc_tx_complete:1;
97 struct iop3xx_desc_dma {
105 u32 upper_pci_src_addr;
106 u32 upper_pci_dest_addr;
109 u32 local_pci_src_addr;
110 u32 local_pci_dest_addr;
116 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
121 struct iop3xx_desc_aau {
128 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
133 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
137 struct iop3xx_aau_gfmr {
138 unsigned int gfmr1:8;
139 unsigned int gfmr2:8;
140 unsigned int gfmr3:8;
141 unsigned int gfmr4:8;
144 struct iop3xx_desc_pq_xor {
149 struct iop3xx_aau_gfmr data_mult1_field;
155 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
160 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
162 struct iop3xx_aau_gfmr data_mult_field;
167 struct iop3xx_desc_dual_xor {
177 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
183 struct iop3xx_desc_aau *aau;
184 struct iop3xx_desc_dma *dma;
185 struct iop3xx_desc_pq_xor *pq_xor;
186 struct iop3xx_desc_dual_xor *dual_xor;
190 static inline int iop_adma_get_max_xor(void)
195 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
197 int id = chan->device->id;
202 return __raw_readl(DMA_DAR(chan));
204 return __raw_readl(AAU_ADAR(chan));
211 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
214 int id = chan->device->id;
219 __raw_writel(next_desc_addr, DMA_NDAR(chan));
222 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
228 #define IOP_ADMA_STATUS_BUSY (1 << 10)
229 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
230 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
231 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
233 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
235 u32 status = __raw_readl(DMA_CSR(chan));
236 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
239 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
242 /* num_slots will only ever be 1, 2, 4, or 8 */
243 return (desc->idx & (num_slots - 1)) ? 0 : 1;
246 /* to do: support large (i.e. > hw max) buffer sizes */
247 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
253 /* to do: support large (i.e. > hw max) buffer sizes */
254 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
260 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
263 const static int slot_count_table[] = { 0,
264 1, 1, 1, 1, /* 01 - 04 */
265 2, 2, 2, 2, /* 05 - 08 */
266 4, 4, 4, 4, /* 09 - 12 */
267 4, 4, 4, 4, /* 13 - 16 */
268 8, 8, 8, 8, /* 17 - 20 */
269 8, 8, 8, 8, /* 21 - 24 */
270 8, 8, 8, 8, /* 25 - 28 */
271 8, 8, 8, 8, /* 29 - 32 */
273 *slots_per_op = slot_count_table[src_cnt];
274 return *slots_per_op;
278 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
280 switch (chan->device->id) {
283 return iop_chan_memcpy_slot_count(0, slots_per_op);
285 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
292 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
295 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
297 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
300 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
301 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
302 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
303 slot_cnt += *slots_per_op;
307 slot_cnt += *slots_per_op;
312 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
315 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
318 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
320 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
323 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
324 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
325 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
326 slot_cnt += *slots_per_op;
330 slot_cnt += *slots_per_op;
335 static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
336 struct iop_adma_chan *chan)
338 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
340 switch (chan->device->id) {
343 return hw_desc.dma->dest_addr;
345 return hw_desc.aau->dest_addr;
352 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
353 struct iop_adma_chan *chan)
355 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
357 switch (chan->device->id) {
360 return hw_desc.dma->byte_count;
362 return hw_desc.aau->byte_count;
369 /* translate the src_idx to a descriptor word index */
370 static inline int __desc_idx(int src_idx)
372 const static int desc_idx_table[] = { 0, 0, 0, 0,
382 return desc_idx_table[src_idx];
385 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
386 struct iop_adma_chan *chan,
389 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
391 switch (chan->device->id) {
394 return hw_desc.dma->src_addr;
402 return hw_desc.aau->src[src_idx];
404 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
407 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
408 int src_idx, dma_addr_t addr)
411 hw_desc->src[src_idx] = addr;
413 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
417 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
419 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
422 struct iop3xx_dma_desc_ctrl field;
425 u_desc_ctrl.value = 0;
426 u_desc_ctrl.field.mem_to_mem_en = 1;
427 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
428 u_desc_ctrl.field.int_en = int_en;
429 hw_desc->desc_ctrl = u_desc_ctrl.value;
430 hw_desc->upper_pci_src_addr = 0;
431 hw_desc->crc_addr = 0;
435 iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
437 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
440 struct iop3xx_aau_desc_ctrl field;
443 u_desc_ctrl.value = 0;
444 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
445 u_desc_ctrl.field.dest_write_en = 1;
446 u_desc_ctrl.field.int_en = int_en;
447 hw_desc->desc_ctrl = u_desc_ctrl.value;
451 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
457 struct iop3xx_aau_desc_ctrl field;
460 u_desc_ctrl.value = 0;
463 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
466 for (i = 24; i < src_cnt; i++) {
467 edcr |= (1 << shift);
470 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
474 if (!u_desc_ctrl.field.blk_ctrl) {
475 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
476 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
480 for (i = 16; i < src_cnt; i++) {
481 edcr |= (1 << shift);
484 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
488 if (!u_desc_ctrl.field.blk_ctrl)
489 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
492 for (i = 8; i < src_cnt; i++) {
493 edcr |= (1 << shift);
496 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
501 for (i = 0; i < src_cnt; i++) {
502 u_desc_ctrl.value |= (1 << shift);
506 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
507 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
510 u_desc_ctrl.field.dest_write_en = 1;
511 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
512 u_desc_ctrl.field.int_en = int_en;
513 hw_desc->desc_ctrl = u_desc_ctrl.value;
515 return u_desc_ctrl.value;
519 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
521 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en);
524 /* return the number of operations */
526 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
528 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
529 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
532 struct iop3xx_aau_desc_ctrl field;
536 hw_desc = desc->hw_desc;
538 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
539 i += slots_per_op, j++) {
540 iter = iop_hw_desc_slot_idx(hw_desc, i);
541 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en);
542 u_desc_ctrl.field.dest_write_en = 0;
543 u_desc_ctrl.field.zero_result_en = 1;
544 u_desc_ctrl.field.int_en = int_en;
545 iter->desc_ctrl = u_desc_ctrl.value;
547 /* for the subsequent descriptors preserve the store queue
548 * and chain them together
552 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
553 prev_hw_desc->next_desc =
554 (u32) (desc->async_tx.phys + (i << 5));
562 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
564 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
567 struct iop3xx_aau_desc_ctrl field;
570 u_desc_ctrl.value = 0;
573 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
574 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
577 if (!u_desc_ctrl.field.blk_ctrl) {
578 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
579 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
581 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
584 if (!u_desc_ctrl.field.blk_ctrl)
585 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
586 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
589 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
590 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
593 u_desc_ctrl.field.dest_write_en = 0;
594 u_desc_ctrl.field.int_en = int_en;
595 hw_desc->desc_ctrl = u_desc_ctrl.value;
598 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
599 struct iop_adma_chan *chan,
602 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
604 switch (chan->device->id) {
607 hw_desc.dma->byte_count = byte_count;
610 hw_desc.aau->byte_count = byte_count;
618 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
619 struct iop_adma_chan *chan)
621 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
623 switch (chan->device->id) {
626 iop_desc_init_memcpy(desc, 1);
627 hw_desc.dma->byte_count = 0;
628 hw_desc.dma->dest_addr = 0;
629 hw_desc.dma->src_addr = 0;
632 iop_desc_init_null_xor(desc, 2, 1);
633 hw_desc.aau->byte_count = 0;
634 hw_desc.aau->dest_addr = 0;
635 hw_desc.aau->src[0] = 0;
636 hw_desc.aau->src[1] = 0;
644 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
646 int slots_per_op = desc->slots_per_op;
647 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
650 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
651 hw_desc->byte_count = len;
654 iter = iop_hw_desc_slot_idx(hw_desc, i);
655 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
656 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
658 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
661 iter = iop_hw_desc_slot_idx(hw_desc, i);
662 iter->byte_count = len;
667 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
668 struct iop_adma_chan *chan,
671 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
673 switch (chan->device->id) {
676 hw_desc.dma->dest_addr = addr;
679 hw_desc.aau->dest_addr = addr;
686 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
689 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
690 hw_desc->src_addr = addr;
694 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
698 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
699 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
702 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
703 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
704 iter = iop_hw_desc_slot_idx(hw_desc, i);
705 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
709 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
710 int src_idx, dma_addr_t addr)
713 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
714 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
717 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
718 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
719 iter = iop_hw_desc_slot_idx(hw_desc, i);
720 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
724 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
727 /* hw_desc->next_desc is the same location for all channels */
728 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
729 BUG_ON(hw_desc.dma->next_desc);
730 hw_desc.dma->next_desc = next_desc_addr;
733 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
735 /* hw_desc->next_desc is the same location for all channels */
736 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
737 return hw_desc.dma->next_desc;
740 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
742 /* hw_desc->next_desc is the same location for all channels */
743 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
744 hw_desc.dma->next_desc = 0;
747 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
750 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
751 hw_desc->src[0] = val;
754 static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
756 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
757 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
759 BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
760 return desc_ctrl.zero_result_err;
763 static inline void iop_chan_append(struct iop_adma_chan *chan)
766 /* workaround dropped interrupts on 3xx */
767 mod_timer(&chan->cleanup_watchdog, jiffies + msecs_to_jiffies(3));
769 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
770 dma_chan_ctrl |= 0x2;
771 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
774 static inline void iop_chan_idle(int busy, struct iop_adma_chan *chan)
777 del_timer(&chan->cleanup_watchdog);
780 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
782 return __raw_readl(DMA_CSR(chan));
785 static inline void iop_chan_disable(struct iop_adma_chan *chan)
787 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
789 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
792 static inline void iop_chan_enable(struct iop_adma_chan *chan)
794 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
797 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
800 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
802 u32 status = __raw_readl(DMA_CSR(chan));
804 __raw_writel(status, DMA_CSR(chan));
807 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
809 u32 status = __raw_readl(DMA_CSR(chan));
811 __raw_writel(status, DMA_CSR(chan));
814 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
816 u32 status = __raw_readl(DMA_CSR(chan));
818 switch (chan->device->id) {
821 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
830 __raw_writel(status, DMA_CSR(chan));
834 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
840 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
846 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
852 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
854 return test_bit(5, &status);
858 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
860 switch (chan->device->id) {
863 return test_bit(2, &status);
870 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
872 switch (chan->device->id) {
875 return test_bit(3, &status);
882 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
884 switch (chan->device->id) {
887 return test_bit(1, &status);