-#ifdef CONFIG_BFIN_SPI_NO_DMA
-# define SPI_DMA 0
-#else
-# define SPI_DMA 1
-#endif
-
-static int spi_dma_xfer(struct bfin_spi_slave *bss, const u8 *tx, u8 *rx,
- uint bytes)
-{
- int ret = -1;
- u16 ndsize, spi_config, dma_config;
- struct dmasg dmasg[2];
- const u8 *buf;
-
- if (tx) {
- debug("%s: doing half duplex TX\n", __func__);
- buf = tx;
- spi_config = TDBR_DMA;
- dma_config = 0;
- } else {
- debug("%s: doing half duplex RX\n", __func__);
- buf = rx;
- spi_config = RDBR_DMA;
- dma_config = WNR;
- }
-
- dmasg[0].start_addr = (unsigned long)buf;
- dmasg[0].x_modify = 1;
- dma_config |= WDSIZE_8 | DMAEN;
- if (bytes <= 65536) {
- blackfin_dcache_flush_invalidate_range(buf, buf + bytes);
- ndsize = NDSIZE_5;
- dmasg[0].cfg = NDSIZE_0 | dma_config | FLOW_STOP | DI_EN;
- dmasg[0].x_count = bytes;
- } else {
- blackfin_dcache_flush_invalidate_range(buf, buf + 65536 - 1);
- ndsize = NDSIZE_7;
- dmasg[0].cfg = NDSIZE_5 | dma_config | FLOW_ARRAY | DMA2D;
- dmasg[0].x_count = 0; /* 2^16 */
- dmasg[0].y_count = bytes >> 16; /* count / 2^16 */
- dmasg[0].y_modify = 1;
- dmasg[1].start_addr = (unsigned long)(buf + (bytes & ~0xFFFF));
- dmasg[1].cfg = NDSIZE_0 | dma_config | FLOW_STOP | DI_EN;
- dmasg[1].x_count = bytes & 0xFFFF; /* count % 2^16 */
- dmasg[1].x_modify = 1;
- }
-
- dma->cfg = 0;
- dma->irq_status = DMA_DONE | DMA_ERR;
- dma->curr_desc_ptr = dmasg;
- write_SPI_CTL(bss, (bss->ctl & ~TDBR_CORE));
- write_SPI_STAT(bss, -1);
- SSYNC();
-
- write_SPI_TDBR(bss, CONFIG_BFIN_SPI_IDLE_VAL);
- dma->cfg = ndsize | FLOW_ARRAY | DMAEN;
- write_SPI_CTL(bss, (bss->ctl & ~TDBR_CORE) | spi_config);
- SSYNC();
-
- /*
- * We already invalidated the first 64k,
- * now while we just wait invalidate the remaining part.
- * Its not likely that the DMA is going to overtake
- */
- if (bytes > 65536)
- blackfin_dcache_flush_invalidate_range(buf + 65536, buf + bytes);
-
- while (!(dma->irq_status & DMA_DONE))
- if (ctrlc())
- goto done;
-
- dma->cfg = 0;
-
- ret = 0;
- done:
- write_SPI_CTL(bss, bss->ctl);
- return ret;
-}
-