2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include <mach/edma.h>
29 #include "dmaengine.h"
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
49 /* Max of 16 segments per channel to conserve PaRAM slots */
51 #define EDMA_MAX_SLOTS MAX_NR_SG
52 #define EDMA_DESCRIPTORS 16
55 struct virt_dma_desc vdesc;
56 struct list_head node;
59 struct edmacc_param pset[0];
65 struct virt_dma_chan vchan;
66 struct list_head node;
67 struct edma_desc *edesc;
71 int slot[EDMA_MAX_SLOTS];
79 struct dma_device dma_slave;
80 struct edma_chan slave_chans[EDMA_CHANS];
85 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
87 return container_of(d, struct edma_cc, dma_slave);
90 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
92 return container_of(c, struct edma_chan, vchan.chan);
95 static inline struct edma_desc
96 *to_edma_desc(struct dma_async_tx_descriptor *tx)
98 return container_of(tx, struct edma_desc, vdesc.tx);
101 static void edma_desc_free(struct virt_dma_desc *vdesc)
103 kfree(container_of(vdesc, struct edma_desc, vdesc));
106 /* Dispatch a queued descriptor to the controller (caller holds lock) */
107 static void edma_execute(struct edma_chan *echan)
109 struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
110 struct edma_desc *edesc;
118 list_del(&vdesc->node);
120 echan->edesc = edesc = to_edma_desc(&vdesc->tx);
122 /* Write descriptor PaRAM set(s) */
123 for (i = 0; i < edesc->pset_nr; i++) {
124 edma_write_slot(echan->slot[i], &edesc->pset[i]);
125 dev_dbg(echan->vchan.chan.device->dev,
137 i, echan->ch_num, echan->slot[i],
141 edesc->pset[i].a_b_cnt,
143 edesc->pset[i].src_dst_bidx,
144 edesc->pset[i].src_dst_cidx,
145 edesc->pset[i].link_bcntrld);
146 /* Link to the previous slot if not the last set */
147 if (i != (edesc->pset_nr - 1))
148 edma_link(echan->slot[i], echan->slot[i+1]);
149 /* Final pset links to the dummy pset */
151 edma_link(echan->slot[i], echan->ecc->dummy_slot);
154 edma_start(echan->ch_num);
157 static int edma_terminate_all(struct edma_chan *echan)
162 spin_lock_irqsave(&echan->vchan.lock, flags);
165 * Stop DMA activity: we assume the callback will not be called
166 * after edma_dma() returns (even if it does, it will see
167 * echan->edesc is NULL and exit.)
171 edma_stop(echan->ch_num);
174 vchan_get_all_descriptors(&echan->vchan, &head);
175 spin_unlock_irqrestore(&echan->vchan.lock, flags);
176 vchan_dma_desc_free_list(&echan->vchan, &head);
182 static int edma_slave_config(struct edma_chan *echan,
183 struct dma_slave_config *config)
185 if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) ||
186 (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
189 if (config->direction == DMA_MEM_TO_DEV) {
190 if (config->dst_addr)
191 echan->addr = config->dst_addr;
192 if (config->dst_addr_width)
193 echan->addr_width = config->dst_addr_width;
194 if (config->dst_maxburst)
195 echan->maxburst = config->dst_maxburst;
196 } else if (config->direction == DMA_DEV_TO_MEM) {
197 if (config->src_addr)
198 echan->addr = config->src_addr;
199 if (config->src_addr_width)
200 echan->addr_width = config->src_addr_width;
201 if (config->src_maxburst)
202 echan->maxburst = config->src_maxburst;
208 static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
212 struct dma_slave_config *config;
213 struct edma_chan *echan = to_edma_chan(chan);
216 case DMA_TERMINATE_ALL:
217 edma_terminate_all(echan);
219 case DMA_SLAVE_CONFIG:
220 config = (struct dma_slave_config *)arg;
221 ret = edma_slave_config(echan, config);
230 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
231 struct dma_chan *chan, struct scatterlist *sgl,
232 unsigned int sg_len, enum dma_transfer_direction direction,
233 unsigned long tx_flags, void *context)
235 struct edma_chan *echan = to_edma_chan(chan);
236 struct device *dev = chan->device->dev;
237 struct edma_desc *edesc;
238 struct scatterlist *sg;
240 int acnt, bcnt, ccnt, src, dst, cidx;
241 int src_bidx, dst_bidx, src_cidx, dst_cidx;
243 if (unlikely(!echan || !sgl || !sg_len))
246 if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
247 dev_err(dev, "Undefined slave buswidth\n");
251 if (sg_len > MAX_NR_SG) {
252 dev_err(dev, "Exceeded max SG segments %d > %d\n",
257 edesc = kzalloc(sizeof(*edesc) + sg_len *
258 sizeof(edesc->pset[0]), GFP_ATOMIC);
260 dev_dbg(dev, "Failed to allocate a descriptor\n");
264 edesc->pset_nr = sg_len;
266 for_each_sg(sgl, sg, sg_len, i) {
267 /* Allocate a PaRAM slot, if needed */
268 if (echan->slot[i] < 0) {
270 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
272 if (echan->slot[i] < 0) {
273 dev_err(dev, "Failed to allocate slot\n");
278 acnt = echan->addr_width;
281 * If the maxburst is equal to the fifo width, use
282 * A-synced transfers. This allows for large contiguous
283 * buffer transfers using only one PaRAM set.
285 if (echan->maxburst == 1) {
286 edesc->absync = false;
287 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
288 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
295 * If maxburst is greater than the fifo address_width,
296 * use AB-synced transfers where A count is the fifo
297 * address_width and B count is the maxburst. In this
298 * case, we are limited to transfers of C count frames
299 * of (address_width * maxburst) where C count is limited
300 * to SZ_64K-1. This places an upper bound on the length
301 * of an SG segment that can be handled.
304 edesc->absync = true;
305 bcnt = echan->maxburst;
306 ccnt = sg_dma_len(sg) / (acnt * bcnt);
307 if (ccnt > (SZ_64K - 1)) {
308 dev_err(dev, "Exceeded max SG segment size\n");
314 if (direction == DMA_MEM_TO_DEV) {
315 src = sg_dma_address(sg);
323 dst = sg_dma_address(sg);
330 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
331 /* Configure A or AB synchronized transfers */
333 edesc->pset[i].opt |= SYNCDIM;
334 /* If this is the last set, enable completion interrupt flag */
336 edesc->pset[i].opt |= TCINTEN;
338 edesc->pset[i].src = src;
339 edesc->pset[i].dst = dst;
341 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
342 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
344 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
345 edesc->pset[i].ccnt = ccnt;
346 edesc->pset[i].link_bcntrld = 0xffffffff;
350 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
353 static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
355 struct edma_chan *echan = data;
356 struct device *dev = echan->vchan.chan.device->dev;
357 struct edma_desc *edesc;
360 /* Stop the channel */
361 edma_stop(echan->ch_num);
365 dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
367 spin_lock_irqsave(&echan->vchan.lock, flags);
369 edesc = echan->edesc;
372 vchan_cookie_complete(&edesc->vdesc);
375 spin_unlock_irqrestore(&echan->vchan.lock, flags);
379 dev_dbg(dev, "transfer error on channel %d\n", ch_num);
386 /* Alloc channel resources */
387 static int edma_alloc_chan_resources(struct dma_chan *chan)
389 struct edma_chan *echan = to_edma_chan(chan);
390 struct device *dev = chan->device->dev;
395 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
396 chan, EVENTQ_DEFAULT);
403 if (a_ch_num != echan->ch_num) {
404 dev_err(dev, "failed to allocate requested channel %u:%u\n",
405 EDMA_CTLR(echan->ch_num),
406 EDMA_CHAN_SLOT(echan->ch_num));
411 echan->alloced = true;
412 echan->slot[0] = echan->ch_num;
414 dev_info(dev, "allocated channel for %u:%u\n",
415 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
420 edma_free_channel(a_ch_num);
425 /* Free channel resources */
426 static void edma_free_chan_resources(struct dma_chan *chan)
428 struct edma_chan *echan = to_edma_chan(chan);
429 struct device *dev = chan->device->dev;
432 /* Terminate transfers */
433 edma_stop(echan->ch_num);
435 vchan_free_chan_resources(&echan->vchan);
437 /* Free EDMA PaRAM slots */
438 for (i = 1; i < EDMA_MAX_SLOTS; i++) {
439 if (echan->slot[i] >= 0) {
440 edma_free_slot(echan->slot[i]);
445 /* Free EDMA channel */
446 if (echan->alloced) {
447 edma_free_channel(echan->ch_num);
448 echan->alloced = false;
451 dev_info(dev, "freeing channel for %u\n", echan->ch_num);
454 /* Send pending descriptor to hardware */
455 static void edma_issue_pending(struct dma_chan *chan)
457 struct edma_chan *echan = to_edma_chan(chan);
460 spin_lock_irqsave(&echan->vchan.lock, flags);
461 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
463 spin_unlock_irqrestore(&echan->vchan.lock, flags);
466 static size_t edma_desc_size(struct edma_desc *edesc)
472 for (size = i = 0; i < edesc->pset_nr; i++)
473 size += (edesc->pset[i].a_b_cnt & 0xffff) *
474 (edesc->pset[i].a_b_cnt >> 16) *
477 size = (edesc->pset[0].a_b_cnt & 0xffff) *
478 (edesc->pset[0].a_b_cnt >> 16) +
479 (edesc->pset[0].a_b_cnt & 0xffff) *
480 (SZ_64K - 1) * edesc->pset[0].ccnt;
485 /* Check request completion status */
486 static enum dma_status edma_tx_status(struct dma_chan *chan,
488 struct dma_tx_state *txstate)
490 struct edma_chan *echan = to_edma_chan(chan);
491 struct virt_dma_desc *vdesc;
495 ret = dma_cookie_status(chan, cookie, txstate);
496 if (ret == DMA_SUCCESS || !txstate)
499 spin_lock_irqsave(&echan->vchan.lock, flags);
500 vdesc = vchan_find_desc(&echan->vchan, cookie);
502 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
503 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
504 struct edma_desc *edesc = echan->edesc;
505 txstate->residue = edma_desc_size(edesc);
507 txstate->residue = 0;
509 spin_unlock_irqrestore(&echan->vchan.lock, flags);
514 static void __init edma_chan_init(struct edma_cc *ecc,
515 struct dma_device *dma,
516 struct edma_chan *echans)
520 for (i = 0; i < EDMA_CHANS; i++) {
521 struct edma_chan *echan = &echans[i];
522 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
524 echan->vchan.desc_free = edma_desc_free;
526 vchan_init(&echan->vchan, dma);
528 INIT_LIST_HEAD(&echan->node);
529 for (j = 0; j < EDMA_MAX_SLOTS; j++)
534 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
537 dma->device_prep_slave_sg = edma_prep_slave_sg;
538 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
539 dma->device_free_chan_resources = edma_free_chan_resources;
540 dma->device_issue_pending = edma_issue_pending;
541 dma->device_tx_status = edma_tx_status;
542 dma->device_control = edma_control;
545 INIT_LIST_HEAD(&dma->channels);
548 static int __devinit edma_probe(struct platform_device *pdev)
553 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
555 dev_err(&pdev->dev, "Can't allocate controller\n");
559 ecc->ctlr = pdev->id;
560 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
561 if (ecc->dummy_slot < 0) {
562 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
566 dma_cap_zero(ecc->dma_slave.cap_mask);
567 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
569 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
571 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
573 ret = dma_async_device_register(&ecc->dma_slave);
577 platform_set_drvdata(pdev, ecc);
579 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
584 edma_free_slot(ecc->dummy_slot);
588 static int __devexit edma_remove(struct platform_device *pdev)
590 struct device *dev = &pdev->dev;
591 struct edma_cc *ecc = dev_get_drvdata(dev);
593 dma_async_device_unregister(&ecc->dma_slave);
594 edma_free_slot(ecc->dummy_slot);
599 static struct platform_driver edma_driver = {
601 .remove = __devexit_p(edma_remove),
603 .name = "edma-dma-engine",
604 .owner = THIS_MODULE,
608 bool edma_filter_fn(struct dma_chan *chan, void *param)
610 if (chan->device->dev->driver == &edma_driver.driver) {
611 struct edma_chan *echan = to_edma_chan(chan);
612 unsigned ch_req = *(unsigned *)param;
613 return ch_req == echan->ch_num;
617 EXPORT_SYMBOL(edma_filter_fn);
619 static struct platform_device *pdev0, *pdev1;
621 static const struct platform_device_info edma_dev_info0 = {
622 .name = "edma-dma-engine",
624 .dma_mask = DMA_BIT_MASK(32),
627 static const struct platform_device_info edma_dev_info1 = {
628 .name = "edma-dma-engine",
630 .dma_mask = DMA_BIT_MASK(32),
633 static int edma_init(void)
635 int ret = platform_driver_register(&edma_driver);
638 pdev0 = platform_device_register_full(&edma_dev_info0);
640 platform_driver_unregister(&edma_driver);
641 ret = PTR_ERR(pdev0);
646 if (EDMA_CTLRS == 2) {
647 pdev1 = platform_device_register_full(&edma_dev_info1);
649 platform_driver_unregister(&edma_driver);
650 platform_device_unregister(pdev0);
651 ret = PTR_ERR(pdev1);
658 subsys_initcall(edma_init);
660 static void __exit edma_exit(void)
662 platform_device_unregister(pdev0);
664 platform_device_unregister(pdev1);
665 platform_driver_unregister(&edma_driver);
667 module_exit(edma_exit);
670 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
671 MODULE_LICENSE("GPL v2");