1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
10 #include "fsl-edma-common.h"
16 #define EDMA_SERQ 0x1B
17 #define EDMA_CERQ 0x1A
18 #define EDMA_SEEI 0x19
19 #define EDMA_CEEI 0x18
20 #define EDMA_CINT 0x1F
21 #define EDMA_CERR 0x1E
22 #define EDMA_SSRT 0x1D
23 #define EDMA_CDNE 0x1C
24 #define EDMA_INTR 0x24
27 #define EDMA64_ERQH 0x08
28 #define EDMA64_EEIH 0x10
29 #define EDMA64_SERQ 0x18
30 #define EDMA64_CERQ 0x19
31 #define EDMA64_SEEI 0x1a
32 #define EDMA64_CEEI 0x1b
33 #define EDMA64_CINT 0x1c
34 #define EDMA64_CERR 0x1d
35 #define EDMA64_SSRT 0x1e
36 #define EDMA64_CDNE 0x1f
37 #define EDMA64_INTH 0x20
38 #define EDMA64_INTL 0x24
39 #define EDMA64_ERRH 0x28
40 #define EDMA64_ERRL 0x2c
42 #define EDMA_TCD 0x1000
44 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
46 struct edma_regs *regs = &fsl_chan->edma->regs;
47 u32 ch = fsl_chan->vchan.chan.chan_id;
49 if (fsl_chan->edma->version == v1) {
50 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
51 edma_writeb(fsl_chan->edma, ch, regs->serq);
53 /* ColdFire is big endian, and accesses natively
54 * big endian I/O peripherals
56 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
57 iowrite8(ch, regs->serq);
61 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
63 struct edma_regs *regs = &fsl_chan->edma->regs;
64 u32 ch = fsl_chan->vchan.chan.chan_id;
66 if (fsl_chan->edma->version == v1) {
67 edma_writeb(fsl_chan->edma, ch, regs->cerq);
68 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
70 /* ColdFire is big endian, and accesses natively
71 * big endian I/O peripherals
73 iowrite8(ch, regs->cerq);
74 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
77 EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
79 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
80 unsigned int slot, bool enable)
82 u32 ch = fsl_chan->vchan.chan.chan_id;
83 void __iomem *muxaddr;
84 unsigned int chans_per_mux, ch_off;
86 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
87 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
88 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
89 slot = EDMAMUX_CHCFG_SOURCE(slot);
92 iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
94 iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
96 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
98 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
100 switch (addr_width) {
102 return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
104 return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
106 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
108 return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
110 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
114 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
116 struct fsl_edma_desc *fsl_desc;
119 fsl_desc = to_fsl_edma_desc(vdesc);
120 for (i = 0; i < fsl_desc->n_tcds; i++)
121 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
122 fsl_desc->tcd[i].ptcd);
125 EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
127 int fsl_edma_terminate_all(struct dma_chan *chan)
129 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
133 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
134 fsl_edma_disable_request(fsl_chan);
135 fsl_chan->edesc = NULL;
136 fsl_chan->idle = true;
137 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
138 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
139 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
142 EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
144 int fsl_edma_pause(struct dma_chan *chan)
146 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
149 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
150 if (fsl_chan->edesc) {
151 fsl_edma_disable_request(fsl_chan);
152 fsl_chan->status = DMA_PAUSED;
153 fsl_chan->idle = true;
155 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
158 EXPORT_SYMBOL_GPL(fsl_edma_pause);
160 int fsl_edma_resume(struct dma_chan *chan)
162 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
165 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
166 if (fsl_chan->edesc) {
167 fsl_edma_enable_request(fsl_chan);
168 fsl_chan->status = DMA_IN_PROGRESS;
169 fsl_chan->idle = false;
171 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
174 EXPORT_SYMBOL_GPL(fsl_edma_resume);
176 int fsl_edma_slave_config(struct dma_chan *chan,
177 struct dma_slave_config *cfg)
179 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
181 memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
185 EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
187 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
188 struct virt_dma_desc *vdesc, bool in_progress)
190 struct fsl_edma_desc *edesc = fsl_chan->edesc;
191 struct edma_regs *regs = &fsl_chan->edma->regs;
192 u32 ch = fsl_chan->vchan.chan.chan_id;
193 enum dma_transfer_direction dir = edesc->dirn;
194 dma_addr_t cur_addr, dma_addr;
198 /* calculate the total size in this desc */
199 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
200 len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
201 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
206 if (dir == DMA_MEM_TO_DEV)
207 cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr);
209 cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr);
211 /* figure out the finished and calculate the residue */
212 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
213 size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
214 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
215 if (dir == DMA_MEM_TO_DEV)
216 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
218 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
221 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
222 len += dma_addr + size - cur_addr;
230 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
231 dma_cookie_t cookie, struct dma_tx_state *txstate)
233 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
234 struct virt_dma_desc *vdesc;
235 enum dma_status status;
238 status = dma_cookie_status(chan, cookie, txstate);
239 if (status == DMA_COMPLETE)
243 return fsl_chan->status;
245 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
246 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
247 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
249 fsl_edma_desc_residue(fsl_chan, vdesc, true);
252 fsl_edma_desc_residue(fsl_chan, vdesc, false);
254 txstate->residue = 0;
256 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
258 return fsl_chan->status;
260 EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
262 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
263 struct fsl_edma_hw_tcd *tcd)
265 struct fsl_edma_engine *edma = fsl_chan->edma;
266 struct edma_regs *regs = &fsl_chan->edma->regs;
267 u32 ch = fsl_chan->vchan.chan.chan_id;
270 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
271 * endian format. However, we need to load the TCD registers in
272 * big- or little-endian obeying the eDMA engine model endian.
274 edma_writew(edma, 0, ®s->tcd[ch].csr);
275 edma_writel(edma, le32_to_cpu(tcd->saddr), ®s->tcd[ch].saddr);
276 edma_writel(edma, le32_to_cpu(tcd->daddr), ®s->tcd[ch].daddr);
278 edma_writew(edma, le16_to_cpu(tcd->attr), ®s->tcd[ch].attr);
279 edma_writew(edma, le16_to_cpu(tcd->soff), ®s->tcd[ch].soff);
281 edma_writel(edma, le32_to_cpu(tcd->nbytes), ®s->tcd[ch].nbytes);
282 edma_writel(edma, le32_to_cpu(tcd->slast), ®s->tcd[ch].slast);
284 edma_writew(edma, le16_to_cpu(tcd->citer), ®s->tcd[ch].citer);
285 edma_writew(edma, le16_to_cpu(tcd->biter), ®s->tcd[ch].biter);
286 edma_writew(edma, le16_to_cpu(tcd->doff), ®s->tcd[ch].doff);
288 edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
289 ®s->tcd[ch].dlast_sga);
291 edma_writew(edma, le16_to_cpu(tcd->csr), ®s->tcd[ch].csr);
295 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
296 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
297 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
298 bool disable_req, bool enable_sg)
303 * eDMA hardware SGs require the TCDs to be stored in little
304 * endian format irrespective of the register endian model.
305 * So we put the value in little endian in memory, waiting
306 * for fsl_edma_set_tcd_regs doing the swap.
308 tcd->saddr = cpu_to_le32(src);
309 tcd->daddr = cpu_to_le32(dst);
311 tcd->attr = cpu_to_le16(attr);
313 tcd->soff = cpu_to_le16(soff);
315 tcd->nbytes = cpu_to_le32(nbytes);
316 tcd->slast = cpu_to_le32(slast);
318 tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
319 tcd->doff = cpu_to_le16(doff);
321 tcd->dlast_sga = cpu_to_le32(dlast_sga);
323 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
325 csr |= EDMA_TCD_CSR_INT_MAJOR;
328 csr |= EDMA_TCD_CSR_D_REQ;
331 csr |= EDMA_TCD_CSR_E_SG;
333 tcd->csr = cpu_to_le16(csr);
336 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
339 struct fsl_edma_desc *fsl_desc;
342 fsl_desc = kzalloc(sizeof(*fsl_desc) +
343 sizeof(struct fsl_edma_sw_tcd) *
348 fsl_desc->echan = fsl_chan;
349 fsl_desc->n_tcds = sg_len;
350 for (i = 0; i < sg_len; i++) {
351 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
352 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
353 if (!fsl_desc->tcd[i].vtcd)
360 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
361 fsl_desc->tcd[i].ptcd);
366 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
367 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
368 size_t period_len, enum dma_transfer_direction direction,
371 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
372 struct fsl_edma_desc *fsl_desc;
373 dma_addr_t dma_buf_next;
375 u32 src_addr, dst_addr, last_sg, nbytes;
376 u16 soff, doff, iter;
378 if (!is_slave_direction(direction))
381 sg_len = buf_len / period_len;
382 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
385 fsl_desc->iscyclic = true;
386 fsl_desc->dirn = direction;
388 dma_buf_next = dma_addr;
389 if (direction == DMA_MEM_TO_DEV) {
391 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
392 nbytes = fsl_chan->cfg.dst_addr_width *
393 fsl_chan->cfg.dst_maxburst;
396 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
397 nbytes = fsl_chan->cfg.src_addr_width *
398 fsl_chan->cfg.src_maxburst;
401 iter = period_len / nbytes;
403 for (i = 0; i < sg_len; i++) {
404 if (dma_buf_next >= dma_addr + buf_len)
405 dma_buf_next = dma_addr;
407 /* get next sg's physical address */
408 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
410 if (direction == DMA_MEM_TO_DEV) {
411 src_addr = dma_buf_next;
412 dst_addr = fsl_chan->cfg.dst_addr;
413 soff = fsl_chan->cfg.dst_addr_width;
416 src_addr = fsl_chan->cfg.src_addr;
417 dst_addr = dma_buf_next;
419 doff = fsl_chan->cfg.src_addr_width;
422 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
423 fsl_chan->attr, soff, nbytes, 0, iter,
424 iter, doff, last_sg, true, false, true);
425 dma_buf_next += period_len;
428 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
430 EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
432 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
433 struct dma_chan *chan, struct scatterlist *sgl,
434 unsigned int sg_len, enum dma_transfer_direction direction,
435 unsigned long flags, void *context)
437 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
438 struct fsl_edma_desc *fsl_desc;
439 struct scatterlist *sg;
440 u32 src_addr, dst_addr, last_sg, nbytes;
441 u16 soff, doff, iter;
444 if (!is_slave_direction(direction))
447 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
450 fsl_desc->iscyclic = false;
451 fsl_desc->dirn = direction;
453 if (direction == DMA_MEM_TO_DEV) {
455 fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
456 nbytes = fsl_chan->cfg.dst_addr_width *
457 fsl_chan->cfg.dst_maxburst;
460 fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
461 nbytes = fsl_chan->cfg.src_addr_width *
462 fsl_chan->cfg.src_maxburst;
465 for_each_sg(sgl, sg, sg_len, i) {
466 /* get next sg's physical address */
467 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
469 if (direction == DMA_MEM_TO_DEV) {
470 src_addr = sg_dma_address(sg);
471 dst_addr = fsl_chan->cfg.dst_addr;
472 soff = fsl_chan->cfg.dst_addr_width;
475 src_addr = fsl_chan->cfg.src_addr;
476 dst_addr = sg_dma_address(sg);
478 doff = fsl_chan->cfg.src_addr_width;
481 iter = sg_dma_len(sg) / nbytes;
482 if (i < sg_len - 1) {
483 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
484 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
485 dst_addr, fsl_chan->attr, soff,
486 nbytes, 0, iter, iter, doff, last_sg,
490 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
491 dst_addr, fsl_chan->attr, soff,
492 nbytes, 0, iter, iter, doff, last_sg,
497 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
499 EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
501 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
503 struct virt_dma_desc *vdesc;
505 vdesc = vchan_next_desc(&fsl_chan->vchan);
508 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
509 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
510 fsl_edma_enable_request(fsl_chan);
511 fsl_chan->status = DMA_IN_PROGRESS;
512 fsl_chan->idle = false;
514 EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
516 void fsl_edma_issue_pending(struct dma_chan *chan)
518 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
521 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
523 if (unlikely(fsl_chan->pm_state != RUNNING)) {
524 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
525 /* cannot submit due to suspend */
529 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
530 fsl_edma_xfer_desc(fsl_chan);
532 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
534 EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
536 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
538 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
540 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
541 sizeof(struct fsl_edma_hw_tcd),
545 EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
547 void fsl_edma_free_chan_resources(struct dma_chan *chan)
549 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
553 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
554 fsl_edma_disable_request(fsl_chan);
555 fsl_edma_chan_mux(fsl_chan, 0, false);
556 fsl_chan->edesc = NULL;
557 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
558 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
560 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
561 dma_pool_destroy(fsl_chan->tcd_pool);
562 fsl_chan->tcd_pool = NULL;
564 EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
566 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
568 struct fsl_edma_chan *chan, *_chan;
570 list_for_each_entry_safe(chan, _chan,
571 &dmadev->channels, vchan.chan.device_node) {
572 list_del(&chan->vchan.chan.device_node);
573 tasklet_kill(&chan->vchan.task);
576 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
579 * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
580 * register offsets are different compared to ColdFire mcf5441x 64 channels
581 * edma (here called "v2").
583 * This function sets up register offsets as per proper declared version
584 * so must be called in xxx_edma_probe() just after setting the
585 * edma "version" and "membase" appropriately.
587 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
589 edma->regs.cr = edma->membase + EDMA_CR;
590 edma->regs.es = edma->membase + EDMA_ES;
591 edma->regs.erql = edma->membase + EDMA_ERQ;
592 edma->regs.eeil = edma->membase + EDMA_EEI;
594 edma->regs.serq = edma->membase + ((edma->version == v1) ?
595 EDMA_SERQ : EDMA64_SERQ);
596 edma->regs.cerq = edma->membase + ((edma->version == v1) ?
597 EDMA_CERQ : EDMA64_CERQ);
598 edma->regs.seei = edma->membase + ((edma->version == v1) ?
599 EDMA_SEEI : EDMA64_SEEI);
600 edma->regs.ceei = edma->membase + ((edma->version == v1) ?
601 EDMA_CEEI : EDMA64_CEEI);
602 edma->regs.cint = edma->membase + ((edma->version == v1) ?
603 EDMA_CINT : EDMA64_CINT);
604 edma->regs.cerr = edma->membase + ((edma->version == v1) ?
605 EDMA_CERR : EDMA64_CERR);
606 edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
607 EDMA_SSRT : EDMA64_SSRT);
608 edma->regs.cdne = edma->membase + ((edma->version == v1) ?
609 EDMA_CDNE : EDMA64_CDNE);
610 edma->regs.intl = edma->membase + ((edma->version == v1) ?
611 EDMA_INTR : EDMA64_INTL);
612 edma->regs.errl = edma->membase + ((edma->version == v1) ?
613 EDMA_ERR : EDMA64_ERRL);
615 if (edma->version == v2) {
616 edma->regs.erqh = edma->membase + EDMA64_ERQH;
617 edma->regs.eeih = edma->membase + EDMA64_EEIH;
618 edma->regs.errh = edma->membase + EDMA64_ERRH;
619 edma->regs.inth = edma->membase + EDMA64_INTH;
622 edma->regs.tcd = edma->membase + EDMA_TCD;
624 EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
626 MODULE_LICENSE("GPL v2");