1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas RZ/G2L DMA Controller Driver
7 * Copyright (C) 2021 Renesas Electronics Corp.
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
18 #include <linux/of_dma.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
25 #include "../dmaengine.h"
26 #include "../virt-dma.h"
28 enum rz_dmac_prep_type {
30 RZ_DMAC_DESC_SLAVE_SG,
45 struct virt_dma_desc vd;
49 struct list_head node;
50 enum dma_transfer_direction direction;
51 enum rz_dmac_prep_type type;
53 struct scatterlist *sg;
57 #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
60 struct virt_dma_chan vc;
61 void __iomem *ch_base;
62 void __iomem *ch_cmn_base;
65 struct rz_dmac_desc *desc;
68 enum dma_slave_buswidth src_word_size;
69 enum dma_slave_buswidth dst_word_size;
70 dma_addr_t src_per_address;
71 dma_addr_t dst_per_address;
77 struct list_head ld_free;
78 struct list_head ld_queue;
79 struct list_head ld_active;
82 struct rz_lmdesc *base;
83 struct rz_lmdesc *head;
84 struct rz_lmdesc *tail;
89 #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
92 struct dma_device engine;
95 void __iomem *ext_base;
97 unsigned int n_channels;
98 struct rz_dmac_chan *channels;
100 DECLARE_BITMAP(modules, 1024);
103 #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine)
106 * -----------------------------------------------------------------------------
110 #define CHSTAT 0x0024
111 #define CHCTRL 0x0028
117 #define EACH_CHANNEL_OFFSET 0x0040
118 #define CHANNEL_0_7_OFFSET 0x0000
119 #define CHANNEL_0_7_COMMON_BASE 0x0300
120 #define CHANNEL_8_15_OFFSET 0x0400
121 #define CHANNEL_8_15_COMMON_BASE 0x0700
123 #define CHSTAT_ER BIT(4)
124 #define CHSTAT_EN BIT(0)
126 #define CHCTRL_CLRINTMSK BIT(17)
127 #define CHCTRL_CLRSUS BIT(9)
128 #define CHCTRL_CLRTC BIT(6)
129 #define CHCTRL_CLREND BIT(5)
130 #define CHCTRL_CLRRQ BIT(4)
131 #define CHCTRL_SWRST BIT(3)
132 #define CHCTRL_STG BIT(2)
133 #define CHCTRL_CLREN BIT(1)
134 #define CHCTRL_SETEN BIT(0)
135 #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \
136 CHCTRL_CLRTC | CHCTRL_CLREND | \
137 CHCTRL_CLRRQ | CHCTRL_SWRST | \
140 #define CHCFG_DMS BIT(31)
141 #define CHCFG_DEM BIT(24)
142 #define CHCFG_DAD BIT(21)
143 #define CHCFG_SAD BIT(20)
144 #define CHCFG_REQD BIT(3)
145 #define CHCFG_SEL(bits) ((bits) & 0x07)
146 #define CHCFG_MEM_COPY (0x80400008)
147 #define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
148 #define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
149 #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
150 #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
151 #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
152 #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5)
154 #define MID_RID_MASK GENMASK(9, 0)
155 #define CHCFG_MASK GENMASK(15, 10)
156 #define CHCFG_DS_INVALID 0xFF
157 #define DCTRL_LVINT BIT(1)
158 #define DCTRL_PR BIT(0)
159 #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR)
161 /* LINK MODE DESCRIPTOR */
162 #define HEADER_LV BIT(0)
164 #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16
165 #define RZ_DMAC_MAX_CHANNELS 16
166 #define DMAC_NR_LMDESC 64
169 * -----------------------------------------------------------------------------
173 static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val,
176 writel(val, dmac->base + offset);
179 static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val,
182 writel(val, dmac->ext_base + offset);
185 static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset)
187 return readl(dmac->ext_base + offset);
190 static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val,
191 unsigned int offset, int which)
194 writel(val, channel->ch_base + offset);
196 writel(val, channel->ch_cmn_base + offset);
199 static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel,
200 unsigned int offset, int which)
203 return readl(channel->ch_base + offset);
205 return readl(channel->ch_cmn_base + offset);
209 * -----------------------------------------------------------------------------
213 static void rz_lmdesc_setup(struct rz_dmac_chan *channel,
214 struct rz_lmdesc *lmdesc)
218 channel->lmdesc.base = lmdesc;
219 channel->lmdesc.head = lmdesc;
220 channel->lmdesc.tail = lmdesc;
221 nxla = channel->lmdesc.base_dma;
222 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) {
224 nxla += sizeof(*lmdesc);
230 lmdesc->nxla = channel->lmdesc.base_dma;
234 * -----------------------------------------------------------------------------
235 * Descriptors preparation
238 static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel)
240 struct rz_lmdesc *lmdesc = channel->lmdesc.head;
242 while (!(lmdesc->header & HEADER_LV)) {
245 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
246 lmdesc = channel->lmdesc.base;
248 channel->lmdesc.head = lmdesc;
251 static void rz_dmac_enable_hw(struct rz_dmac_chan *channel)
253 struct dma_chan *chan = &channel->vc.chan;
254 struct rz_dmac *dmac = to_rz_dmac(chan->device);
260 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
262 local_irq_save(flags);
264 rz_dmac_lmdesc_recycle(channel);
266 nxla = channel->lmdesc.base_dma +
267 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head -
268 channel->lmdesc.base));
270 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
271 if (!(chstat & CHSTAT_EN)) {
272 chctrl = (channel->chctrl | CHCTRL_SETEN);
273 rz_dmac_ch_writel(channel, nxla, NXLA, 1);
274 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1);
275 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1);
276 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1);
279 local_irq_restore(flags);
282 static void rz_dmac_disable_hw(struct rz_dmac_chan *channel)
284 struct dma_chan *chan = &channel->vc.chan;
285 struct rz_dmac *dmac = to_rz_dmac(chan->device);
288 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index);
290 local_irq_save(flags);
291 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
292 local_irq_restore(flags);
295 static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars)
297 u32 dmars_offset = (nr / 2) * 4;
298 u32 shift = (nr % 2) * 16;
301 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset);
302 dmars32 &= ~(0xffff << shift);
303 dmars32 |= dmars << shift;
305 rz_dmac_ext_writel(dmac, dmars32, dmars_offset);
308 static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
310 struct dma_chan *chan = &channel->vc.chan;
311 struct rz_dmac *dmac = to_rz_dmac(chan->device);
312 struct rz_lmdesc *lmdesc = channel->lmdesc.tail;
313 struct rz_dmac_desc *d = channel->desc;
314 u32 chcfg = CHCFG_MEM_COPY;
316 /* prepare descriptor */
318 lmdesc->da = d->dest;
320 lmdesc->chcfg = chcfg;
323 lmdesc->header = HEADER_LV;
325 rz_dmac_set_dmars_register(dmac, channel->index, 0);
327 channel->chcfg = chcfg;
328 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
331 static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
333 struct dma_chan *chan = &channel->vc.chan;
334 struct rz_dmac *dmac = to_rz_dmac(chan->device);
335 struct rz_dmac_desc *d = channel->desc;
336 struct scatterlist *sg, *sgl = d->sg;
337 struct rz_lmdesc *lmdesc;
338 unsigned int i, sg_len = d->sgcount;
340 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS;
342 if (d->direction == DMA_DEV_TO_MEM) {
343 channel->chcfg |= CHCFG_SAD;
344 channel->chcfg &= ~CHCFG_REQD;
346 channel->chcfg |= CHCFG_DAD | CHCFG_REQD;
349 lmdesc = channel->lmdesc.tail;
351 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) {
352 if (d->direction == DMA_DEV_TO_MEM) {
353 lmdesc->sa = channel->src_per_address;
354 lmdesc->da = sg_dma_address(sg);
356 lmdesc->sa = sg_dma_address(sg);
357 lmdesc->da = channel->dst_per_address;
360 lmdesc->tb = sg_dma_len(sg);
363 if (i == (sg_len - 1)) {
364 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM);
365 lmdesc->header = HEADER_LV;
367 lmdesc->chcfg = channel->chcfg;
368 lmdesc->header = HEADER_LV;
370 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC))
371 lmdesc = channel->lmdesc.base;
374 channel->lmdesc.tail = lmdesc;
376 rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
377 channel->chctrl = CHCTRL_SETEN;
380 static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
382 struct rz_dmac_desc *d = chan->desc;
383 struct virt_dma_desc *vd;
385 vd = vchan_next_desc(&chan->vc);
392 case RZ_DMAC_DESC_MEMCPY:
393 rz_dmac_prepare_desc_for_memcpy(chan);
396 case RZ_DMAC_DESC_SLAVE_SG:
397 rz_dmac_prepare_descs_for_slave_sg(chan);
404 rz_dmac_enable_hw(chan);
410 * -----------------------------------------------------------------------------
411 * DMA engine operations
414 static int rz_dmac_alloc_chan_resources(struct dma_chan *chan)
416 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
418 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) {
419 struct rz_dmac_desc *desc;
421 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
425 list_add_tail(&desc->node, &channel->ld_free);
426 channel->descs_allocated++;
429 if (!channel->descs_allocated)
432 return channel->descs_allocated;
435 static void rz_dmac_free_chan_resources(struct dma_chan *chan)
437 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
438 struct rz_dmac *dmac = to_rz_dmac(chan->device);
439 struct rz_lmdesc *lmdesc = channel->lmdesc.base;
440 struct rz_dmac_desc *desc, *_desc;
444 spin_lock_irqsave(&channel->vc.lock, flags);
446 for (i = 0; i < DMAC_NR_LMDESC; i++)
447 lmdesc[i].header = 0;
449 rz_dmac_disable_hw(channel);
450 list_splice_tail_init(&channel->ld_active, &channel->ld_free);
451 list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
453 if (channel->mid_rid >= 0) {
454 clear_bit(channel->mid_rid, dmac->modules);
455 channel->mid_rid = -EINVAL;
458 spin_unlock_irqrestore(&channel->vc.lock, flags);
460 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
462 channel->descs_allocated--;
465 INIT_LIST_HEAD(&channel->ld_free);
466 vchan_free_chan_resources(&channel->vc);
469 static struct dma_async_tx_descriptor *
470 rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
471 size_t len, unsigned long flags)
473 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
474 struct rz_dmac *dmac = to_rz_dmac(chan->device);
475 struct rz_dmac_desc *desc;
477 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n",
478 __func__, channel->index, &src, &dest, len);
480 if (list_empty(&channel->ld_free))
483 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
485 desc->type = RZ_DMAC_DESC_MEMCPY;
489 desc->direction = DMA_MEM_TO_MEM;
491 list_move_tail(channel->ld_free.next, &channel->ld_queue);
492 return vchan_tx_prep(&channel->vc, &desc->vd, flags);
495 static struct dma_async_tx_descriptor *
496 rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
498 enum dma_transfer_direction direction,
499 unsigned long flags, void *context)
501 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
502 struct rz_dmac_desc *desc;
503 struct scatterlist *sg;
507 if (list_empty(&channel->ld_free))
510 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node);
512 for_each_sg(sgl, sg, sg_len, i) {
513 dma_length += sg_dma_len(sg);
516 desc->type = RZ_DMAC_DESC_SLAVE_SG;
518 desc->sgcount = sg_len;
519 desc->len = dma_length;
520 desc->direction = direction;
522 if (direction == DMA_DEV_TO_MEM)
523 desc->src = channel->src_per_address;
525 desc->dest = channel->dst_per_address;
527 list_move_tail(channel->ld_free.next, &channel->ld_queue);
528 return vchan_tx_prep(&channel->vc, &desc->vd, flags);
531 static int rz_dmac_terminate_all(struct dma_chan *chan)
533 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
537 rz_dmac_disable_hw(channel);
538 spin_lock_irqsave(&channel->vc.lock, flags);
539 list_splice_tail_init(&channel->ld_active, &channel->ld_free);
540 list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
541 spin_unlock_irqrestore(&channel->vc.lock, flags);
542 vchan_get_all_descriptors(&channel->vc, &head);
543 vchan_dma_desc_free_list(&channel->vc, &head);
548 static void rz_dmac_issue_pending(struct dma_chan *chan)
550 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
551 struct rz_dmac *dmac = to_rz_dmac(chan->device);
552 struct rz_dmac_desc *desc;
555 spin_lock_irqsave(&channel->vc.lock, flags);
557 if (!list_empty(&channel->ld_queue)) {
558 desc = list_first_entry(&channel->ld_queue,
559 struct rz_dmac_desc, node);
560 channel->desc = desc;
561 if (vchan_issue_pending(&channel->vc)) {
562 if (rz_dmac_xfer_desc(channel) < 0)
563 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n",
566 list_move_tail(channel->ld_queue.next,
567 &channel->ld_active);
571 spin_unlock_irqrestore(&channel->vc.lock, flags);
574 static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds)
577 static const enum dma_slave_buswidth ds_lut[] = {
578 DMA_SLAVE_BUSWIDTH_1_BYTE,
579 DMA_SLAVE_BUSWIDTH_2_BYTES,
580 DMA_SLAVE_BUSWIDTH_4_BYTES,
581 DMA_SLAVE_BUSWIDTH_8_BYTES,
582 DMA_SLAVE_BUSWIDTH_16_BYTES,
583 DMA_SLAVE_BUSWIDTH_32_BYTES,
584 DMA_SLAVE_BUSWIDTH_64_BYTES,
585 DMA_SLAVE_BUSWIDTH_128_BYTES,
588 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) {
593 return CHCFG_DS_INVALID;
596 static int rz_dmac_config(struct dma_chan *chan,
597 struct dma_slave_config *config)
599 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
602 channel->src_per_address = config->src_addr;
603 channel->src_word_size = config->src_addr_width;
604 channel->dst_per_address = config->dst_addr;
605 channel->dst_word_size = config->dst_addr_width;
607 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width);
608 if (val == CHCFG_DS_INVALID)
611 channel->chcfg |= CHCFG_FILL_DDS(val);
613 val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
614 if (val == CHCFG_DS_INVALID)
617 channel->chcfg |= CHCFG_FILL_SDS(val);
622 static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
626 * Descriptor allocation is done during alloc_chan_resources and
627 * get freed during free_chan_resources.
628 * list is used to manage the descriptors and avoid any memory
629 * allocation/free during DMA read/write.
634 * -----------------------------------------------------------------------------
638 static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel)
640 struct dma_chan *chan = &channel->vc.chan;
641 struct rz_dmac *dmac = to_rz_dmac(chan->device);
644 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1);
645 if (chstat & CHSTAT_ER) {
646 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n",
647 channel->index, chstat);
648 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
652 chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1);
653 rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1);
658 static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id)
660 struct rz_dmac_chan *channel = dev_id;
663 rz_dmac_irq_handle_channel(channel);
664 return IRQ_WAKE_THREAD;
666 /* handle DMAERR irq */
670 static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id)
672 struct rz_dmac_chan *channel = dev_id;
673 struct rz_dmac_desc *desc = NULL;
676 spin_lock_irqsave(&channel->vc.lock, flags);
678 if (list_empty(&channel->ld_active)) {
679 /* Someone might have called terminate all */
683 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node);
684 vchan_cookie_complete(&desc->vd);
685 list_move_tail(channel->ld_active.next, &channel->ld_free);
686 if (!list_empty(&channel->ld_queue)) {
687 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc,
689 channel->desc = desc;
690 if (rz_dmac_xfer_desc(channel) == 0)
691 list_move_tail(channel->ld_queue.next, &channel->ld_active);
694 spin_unlock_irqrestore(&channel->vc.lock, flags);
700 * -----------------------------------------------------------------------------
701 * OF xlate and channel filter
704 static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
706 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
707 struct rz_dmac *dmac = to_rz_dmac(chan->device);
708 struct of_phandle_args *dma_spec = arg;
711 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK;
712 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10;
713 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
714 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
716 return !test_and_set_bit(channel->mid_rid, dmac->modules);
719 static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
720 struct of_dma *ofdma)
724 if (dma_spec->args_count != 1)
727 /* Only slave DMA channels can be allocated via DT */
729 dma_cap_set(DMA_SLAVE, mask);
731 return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
735 * -----------------------------------------------------------------------------
739 static int rz_dmac_chan_probe(struct rz_dmac *dmac,
740 struct rz_dmac_chan *channel,
743 struct platform_device *pdev = to_platform_device(dmac->dev);
744 struct rz_lmdesc *lmdesc;
745 char pdev_irqname[5];
749 channel->index = index;
750 channel->mid_rid = -EINVAL;
752 /* Request the channel interrupt. */
753 sprintf(pdev_irqname, "ch%u", index);
754 channel->irq = platform_get_irq_byname(pdev, pdev_irqname);
755 if (channel->irq < 0)
758 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
759 dev_name(dmac->dev), index);
763 ret = devm_request_threaded_irq(dmac->dev, channel->irq,
765 rz_dmac_irq_handler_thread, 0,
768 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
773 /* Set io base address for each channel */
775 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET +
776 EACH_CHANNEL_OFFSET * index;
777 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE;
779 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET +
780 EACH_CHANNEL_OFFSET * (index - 8);
781 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE;
784 /* Allocate descriptors */
785 lmdesc = dma_alloc_coherent(&pdev->dev,
786 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
787 &channel->lmdesc.base_dma, GFP_KERNEL);
789 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n");
792 rz_lmdesc_setup(channel, lmdesc);
794 /* Initialize register for each channel */
795 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1);
797 channel->vc.desc_free = rz_dmac_virt_desc_free;
798 vchan_init(&channel->vc, &dmac->engine);
799 INIT_LIST_HEAD(&channel->ld_queue);
800 INIT_LIST_HEAD(&channel->ld_free);
801 INIT_LIST_HEAD(&channel->ld_active);
806 static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
808 struct device_node *np = dev->of_node;
811 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
813 dev_err(dev, "unable to read dma-channels property\n");
817 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) {
818 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels);
825 static int rz_dmac_probe(struct platform_device *pdev)
827 const char *irqname = "error";
828 struct dma_device *engine;
829 struct rz_dmac *dmac;
835 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
839 dmac->dev = &pdev->dev;
840 platform_set_drvdata(pdev, dmac);
842 ret = rz_dmac_parse_of(&pdev->dev, dmac);
846 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
847 sizeof(*dmac->channels), GFP_KERNEL);
851 /* Request resources */
852 dmac->base = devm_platform_ioremap_resource(pdev, 0);
853 if (IS_ERR(dmac->base))
854 return PTR_ERR(dmac->base);
856 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
857 if (IS_ERR(dmac->ext_base))
858 return PTR_ERR(dmac->ext_base);
860 /* Register interrupt handler for error */
861 irq = platform_get_irq_byname(pdev, irqname);
865 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0,
868 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
873 /* Initialize the channels. */
874 INIT_LIST_HEAD(&dmac->engine.channels);
876 pm_runtime_enable(&pdev->dev);
877 ret = pm_runtime_resume_and_get(&pdev->dev);
879 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n");
883 for (i = 0; i < dmac->n_channels; i++) {
884 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i);
889 /* Register the DMAC as a DMA provider for DT. */
890 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate,
895 /* Register the DMA engine device. */
896 engine = &dmac->engine;
897 dma_cap_set(DMA_SLAVE, engine->cap_mask);
898 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
899 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL);
900 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL);
902 engine->dev = &pdev->dev;
904 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources;
905 engine->device_free_chan_resources = rz_dmac_free_chan_resources;
906 engine->device_tx_status = dma_cookie_status;
907 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg;
908 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy;
909 engine->device_config = rz_dmac_config;
910 engine->device_terminate_all = rz_dmac_terminate_all;
911 engine->device_issue_pending = rz_dmac_issue_pending;
913 engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
914 dma_set_max_seg_size(engine->dev, U32_MAX);
916 ret = dma_async_device_register(engine);
918 dev_err(&pdev->dev, "unable to register\n");
919 goto dma_register_err;
924 of_dma_controller_free(pdev->dev.of_node);
926 channel_num = i ? i - 1 : 0;
927 for (i = 0; i < channel_num; i++) {
928 struct rz_dmac_chan *channel = &dmac->channels[i];
930 dma_free_coherent(&pdev->dev,
931 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
932 channel->lmdesc.base,
933 channel->lmdesc.base_dma);
936 pm_runtime_put(&pdev->dev);
938 pm_runtime_disable(&pdev->dev);
943 static int rz_dmac_remove(struct platform_device *pdev)
945 struct rz_dmac *dmac = platform_get_drvdata(pdev);
948 for (i = 0; i < dmac->n_channels; i++) {
949 struct rz_dmac_chan *channel = &dmac->channels[i];
951 dma_free_coherent(&pdev->dev,
952 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC,
953 channel->lmdesc.base,
954 channel->lmdesc.base_dma);
956 of_dma_controller_free(pdev->dev.of_node);
957 dma_async_device_unregister(&dmac->engine);
958 pm_runtime_put(&pdev->dev);
959 pm_runtime_disable(&pdev->dev);
964 static const struct of_device_id of_rz_dmac_match[] = {
965 { .compatible = "renesas,rz-dmac", },
968 MODULE_DEVICE_TABLE(of, of_rz_dmac_match);
970 static struct platform_driver rz_dmac_driver = {
973 .of_match_table = of_rz_dmac_match,
975 .probe = rz_dmac_probe,
976 .remove = rz_dmac_remove,
979 module_platform_driver(rz_dmac_driver);
981 MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver");
983 MODULE_LICENSE("GPL v2");