2 * TI EDMA DMA engine driver
4 * Copyright 2012 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include <linux/platform_data/edma.h>
29 #include "dmaengine.h"
33 * This will go away when the private EDMA API is folded
34 * into this driver and the platform device(s) are
35 * instantiated in the arch code. We can only get away
36 * with this simplification because DA8XX may not be built
37 * in the same kernel image with other DaVinci parts. This
38 * avoids having to sprinkle dmaengine driver platform devices
39 * and data throughout all the existing board files.
41 #ifdef CONFIG_ARCH_DAVINCI_DA8XX
47 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
49 /* Max of 16 segments per channel to conserve PaRAM slots */
51 #define EDMA_MAX_SLOTS MAX_NR_SG
52 #define EDMA_DESCRIPTORS 16
55 struct virt_dma_desc vdesc;
56 struct list_head node;
59 struct edmacc_param pset[0];
65 struct virt_dma_chan vchan;
66 struct list_head node;
67 struct edma_desc *edesc;
71 int slot[EDMA_MAX_SLOTS];
72 struct dma_slave_config cfg;
77 struct dma_device dma_slave;
78 struct edma_chan slave_chans[EDMA_CHANS];
83 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
85 return container_of(d, struct edma_cc, dma_slave);
88 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
90 return container_of(c, struct edma_chan, vchan.chan);
93 static inline struct edma_desc
94 *to_edma_desc(struct dma_async_tx_descriptor *tx)
96 return container_of(tx, struct edma_desc, vdesc.tx);
99 static void edma_desc_free(struct virt_dma_desc *vdesc)
101 kfree(container_of(vdesc, struct edma_desc, vdesc));
104 /* Dispatch a queued descriptor to the controller (caller holds lock) */
105 static void edma_execute(struct edma_chan *echan)
107 struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan);
108 struct edma_desc *edesc;
116 list_del(&vdesc->node);
118 echan->edesc = edesc = to_edma_desc(&vdesc->tx);
120 /* Write descriptor PaRAM set(s) */
121 for (i = 0; i < edesc->pset_nr; i++) {
122 edma_write_slot(echan->slot[i], &edesc->pset[i]);
123 dev_dbg(echan->vchan.chan.device->dev,
135 i, echan->ch_num, echan->slot[i],
139 edesc->pset[i].a_b_cnt,
141 edesc->pset[i].src_dst_bidx,
142 edesc->pset[i].src_dst_cidx,
143 edesc->pset[i].link_bcntrld);
144 /* Link to the previous slot if not the last set */
145 if (i != (edesc->pset_nr - 1))
146 edma_link(echan->slot[i], echan->slot[i+1]);
147 /* Final pset links to the dummy pset */
149 edma_link(echan->slot[i], echan->ecc->dummy_slot);
152 edma_start(echan->ch_num);
155 static int edma_terminate_all(struct edma_chan *echan)
160 spin_lock_irqsave(&echan->vchan.lock, flags);
163 * Stop DMA activity: we assume the callback will not be called
164 * after edma_dma() returns (even if it does, it will see
165 * echan->edesc is NULL and exit.)
169 edma_stop(echan->ch_num);
172 vchan_get_all_descriptors(&echan->vchan, &head);
173 spin_unlock_irqrestore(&echan->vchan.lock, flags);
174 vchan_dma_desc_free_list(&echan->vchan, &head);
179 static int edma_slave_config(struct edma_chan *echan,
180 struct dma_slave_config *cfg)
182 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
183 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
186 memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
191 static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
195 struct dma_slave_config *config;
196 struct edma_chan *echan = to_edma_chan(chan);
199 case DMA_TERMINATE_ALL:
200 edma_terminate_all(echan);
202 case DMA_SLAVE_CONFIG:
203 config = (struct dma_slave_config *)arg;
204 ret = edma_slave_config(echan, config);
213 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
214 struct dma_chan *chan, struct scatterlist *sgl,
215 unsigned int sg_len, enum dma_transfer_direction direction,
216 unsigned long tx_flags, void *context)
218 struct edma_chan *echan = to_edma_chan(chan);
219 struct device *dev = chan->device->dev;
220 struct edma_desc *edesc;
222 enum dma_slave_buswidth dev_width;
224 struct scatterlist *sg;
226 int acnt, bcnt, ccnt, src, dst, cidx;
227 int src_bidx, dst_bidx, src_cidx, dst_cidx;
229 if (unlikely(!echan || !sgl || !sg_len))
232 if (direction == DMA_DEV_TO_MEM) {
233 dev_addr = echan->cfg.src_addr;
234 dev_width = echan->cfg.src_addr_width;
235 burst = echan->cfg.src_maxburst;
236 } else if (direction == DMA_MEM_TO_DEV) {
237 dev_addr = echan->cfg.dst_addr;
238 dev_width = echan->cfg.dst_addr_width;
239 burst = echan->cfg.dst_maxburst;
241 dev_err(dev, "%s: bad direction?\n", __func__);
245 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
246 dev_err(dev, "Undefined slave buswidth\n");
250 if (sg_len > MAX_NR_SG) {
251 dev_err(dev, "Exceeded max SG segments %d > %d\n",
256 edesc = kzalloc(sizeof(*edesc) + sg_len *
257 sizeof(edesc->pset[0]), GFP_ATOMIC);
259 dev_dbg(dev, "Failed to allocate a descriptor\n");
263 edesc->pset_nr = sg_len;
265 for_each_sg(sgl, sg, sg_len, i) {
266 /* Allocate a PaRAM slot, if needed */
267 if (echan->slot[i] < 0) {
269 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
271 if (echan->slot[i] < 0) {
272 dev_err(dev, "Failed to allocate slot\n");
280 * If the maxburst is equal to the fifo width, use
281 * A-synced transfers. This allows for large contiguous
282 * buffer transfers using only one PaRAM set.
285 edesc->absync = false;
286 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
287 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
294 * If maxburst is greater than the fifo address_width,
295 * use AB-synced transfers where A count is the fifo
296 * address_width and B count is the maxburst. In this
297 * case, we are limited to transfers of C count frames
298 * of (address_width * maxburst) where C count is limited
299 * to SZ_64K-1. This places an upper bound on the length
300 * of an SG segment that can be handled.
303 edesc->absync = true;
305 ccnt = sg_dma_len(sg) / (acnt * bcnt);
306 if (ccnt > (SZ_64K - 1)) {
307 dev_err(dev, "Exceeded max SG segment size\n");
313 if (direction == DMA_MEM_TO_DEV) {
314 src = sg_dma_address(sg);
322 dst = sg_dma_address(sg);
329 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
330 /* Configure A or AB synchronized transfers */
332 edesc->pset[i].opt |= SYNCDIM;
333 /* If this is the last set, enable completion interrupt flag */
335 edesc->pset[i].opt |= TCINTEN;
337 edesc->pset[i].src = src;
338 edesc->pset[i].dst = dst;
340 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
341 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
343 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
344 edesc->pset[i].ccnt = ccnt;
345 edesc->pset[i].link_bcntrld = 0xffffffff;
349 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
352 static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
354 struct edma_chan *echan = data;
355 struct device *dev = echan->vchan.chan.device->dev;
356 struct edma_desc *edesc;
359 /* Stop the channel */
360 edma_stop(echan->ch_num);
364 dev_dbg(dev, "transfer complete on channel %d\n", ch_num);
366 spin_lock_irqsave(&echan->vchan.lock, flags);
368 edesc = echan->edesc;
371 vchan_cookie_complete(&edesc->vdesc);
374 spin_unlock_irqrestore(&echan->vchan.lock, flags);
378 dev_dbg(dev, "transfer error on channel %d\n", ch_num);
385 /* Alloc channel resources */
386 static int edma_alloc_chan_resources(struct dma_chan *chan)
388 struct edma_chan *echan = to_edma_chan(chan);
389 struct device *dev = chan->device->dev;
394 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
395 chan, EVENTQ_DEFAULT);
402 if (a_ch_num != echan->ch_num) {
403 dev_err(dev, "failed to allocate requested channel %u:%u\n",
404 EDMA_CTLR(echan->ch_num),
405 EDMA_CHAN_SLOT(echan->ch_num));
410 echan->alloced = true;
411 echan->slot[0] = echan->ch_num;
413 dev_info(dev, "allocated channel for %u:%u\n",
414 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
419 edma_free_channel(a_ch_num);
424 /* Free channel resources */
425 static void edma_free_chan_resources(struct dma_chan *chan)
427 struct edma_chan *echan = to_edma_chan(chan);
428 struct device *dev = chan->device->dev;
431 /* Terminate transfers */
432 edma_stop(echan->ch_num);
434 vchan_free_chan_resources(&echan->vchan);
436 /* Free EDMA PaRAM slots */
437 for (i = 1; i < EDMA_MAX_SLOTS; i++) {
438 if (echan->slot[i] >= 0) {
439 edma_free_slot(echan->slot[i]);
444 /* Free EDMA channel */
445 if (echan->alloced) {
446 edma_free_channel(echan->ch_num);
447 echan->alloced = false;
450 dev_info(dev, "freeing channel for %u\n", echan->ch_num);
453 /* Send pending descriptor to hardware */
454 static void edma_issue_pending(struct dma_chan *chan)
456 struct edma_chan *echan = to_edma_chan(chan);
459 spin_lock_irqsave(&echan->vchan.lock, flags);
460 if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
462 spin_unlock_irqrestore(&echan->vchan.lock, flags);
465 static size_t edma_desc_size(struct edma_desc *edesc)
471 for (size = i = 0; i < edesc->pset_nr; i++)
472 size += (edesc->pset[i].a_b_cnt & 0xffff) *
473 (edesc->pset[i].a_b_cnt >> 16) *
476 size = (edesc->pset[0].a_b_cnt & 0xffff) *
477 (edesc->pset[0].a_b_cnt >> 16) +
478 (edesc->pset[0].a_b_cnt & 0xffff) *
479 (SZ_64K - 1) * edesc->pset[0].ccnt;
484 /* Check request completion status */
485 static enum dma_status edma_tx_status(struct dma_chan *chan,
487 struct dma_tx_state *txstate)
489 struct edma_chan *echan = to_edma_chan(chan);
490 struct virt_dma_desc *vdesc;
494 ret = dma_cookie_status(chan, cookie, txstate);
495 if (ret == DMA_SUCCESS || !txstate)
498 spin_lock_irqsave(&echan->vchan.lock, flags);
499 vdesc = vchan_find_desc(&echan->vchan, cookie);
501 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx));
502 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
503 struct edma_desc *edesc = echan->edesc;
504 txstate->residue = edma_desc_size(edesc);
506 txstate->residue = 0;
508 spin_unlock_irqrestore(&echan->vchan.lock, flags);
513 static void __init edma_chan_init(struct edma_cc *ecc,
514 struct dma_device *dma,
515 struct edma_chan *echans)
519 for (i = 0; i < EDMA_CHANS; i++) {
520 struct edma_chan *echan = &echans[i];
521 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
523 echan->vchan.desc_free = edma_desc_free;
525 vchan_init(&echan->vchan, dma);
527 INIT_LIST_HEAD(&echan->node);
528 for (j = 0; j < EDMA_MAX_SLOTS; j++)
533 static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
536 dma->device_prep_slave_sg = edma_prep_slave_sg;
537 dma->device_alloc_chan_resources = edma_alloc_chan_resources;
538 dma->device_free_chan_resources = edma_free_chan_resources;
539 dma->device_issue_pending = edma_issue_pending;
540 dma->device_tx_status = edma_tx_status;
541 dma->device_control = edma_control;
544 INIT_LIST_HEAD(&dma->channels);
547 static int edma_probe(struct platform_device *pdev)
552 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
554 dev_err(&pdev->dev, "Can't allocate controller\n");
558 ecc->ctlr = pdev->id;
559 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
560 if (ecc->dummy_slot < 0) {
561 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
565 dma_cap_zero(ecc->dma_slave.cap_mask);
566 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
568 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);
570 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);
572 ret = dma_async_device_register(&ecc->dma_slave);
576 platform_set_drvdata(pdev, ecc);
578 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");
583 edma_free_slot(ecc->dummy_slot);
587 static int edma_remove(struct platform_device *pdev)
589 struct device *dev = &pdev->dev;
590 struct edma_cc *ecc = dev_get_drvdata(dev);
592 dma_async_device_unregister(&ecc->dma_slave);
593 edma_free_slot(ecc->dummy_slot);
598 static struct platform_driver edma_driver = {
600 .remove = edma_remove,
602 .name = "edma-dma-engine",
603 .owner = THIS_MODULE,
607 bool edma_filter_fn(struct dma_chan *chan, void *param)
609 if (chan->device->dev->driver == &edma_driver.driver) {
610 struct edma_chan *echan = to_edma_chan(chan);
611 unsigned ch_req = *(unsigned *)param;
612 return ch_req == echan->ch_num;
616 EXPORT_SYMBOL(edma_filter_fn);
618 static struct platform_device *pdev0, *pdev1;
620 static const struct platform_device_info edma_dev_info0 = {
621 .name = "edma-dma-engine",
625 static const struct platform_device_info edma_dev_info1 = {
626 .name = "edma-dma-engine",
630 static int edma_init(void)
632 int ret = platform_driver_register(&edma_driver);
635 pdev0 = platform_device_register_full(&edma_dev_info0);
637 platform_driver_unregister(&edma_driver);
638 ret = PTR_ERR(pdev0);
641 pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
642 pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
645 if (EDMA_CTLRS == 2) {
646 pdev1 = platform_device_register_full(&edma_dev_info1);
648 platform_driver_unregister(&edma_driver);
649 platform_device_unregister(pdev0);
650 ret = PTR_ERR(pdev1);
652 pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
653 pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
659 subsys_initcall(edma_init);
661 static void __exit edma_exit(void)
663 platform_device_unregister(pdev0);
665 platform_device_unregister(pdev1);
666 platform_driver_unregister(&edma_driver);
668 module_exit(edma_exit);
671 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
672 MODULE_LICENSE("GPL v2");