1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx ZynqMP DPDMA Engine driver
5 * Copyright (C) 2015 - 2020 Xilinx, Inc.
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dma/xilinx_dpdma.h>
16 #include <linux/dmaengine.h>
17 #include <linux/dmapool.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
23 #include <linux/sched.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <linux/wait.h>
28 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
30 #include "../dmaengine.h"
31 #include "../virt-dma.h"
34 #define XILINX_DPDMA_ERR_CTRL 0x000
35 #define XILINX_DPDMA_ISR 0x004
36 #define XILINX_DPDMA_IMR 0x008
37 #define XILINX_DPDMA_IEN 0x00c
38 #define XILINX_DPDMA_IDS 0x010
39 #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0)
40 #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0)
41 #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6)
42 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6)
43 #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12)
44 #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12)
45 #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16)
46 #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18)
47 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
48 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
49 #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
50 #define XILINX_DPDMA_INTR_VSYNC BIT(27)
51 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000
52 #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000
53 #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000
54 #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000
55 #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041
56 #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000
57 #define XILINX_DPDMA_INTR_ALL 0x0fffffff
58 #define XILINX_DPDMA_EISR 0x014
59 #define XILINX_DPDMA_EIMR 0x018
60 #define XILINX_DPDMA_EIEN 0x01c
61 #define XILINX_DPDMA_EIDS 0x020
62 #define XILINX_DPDMA_EINTR_INV_APB BIT(0)
63 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1)
64 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1)
65 #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7)
66 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7)
67 #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13)
68 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13)
69 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19)
70 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19)
71 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25)
72 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25)
73 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
74 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082
75 #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
76 #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
77 #define XILINX_DPDMA_EINTR_ALL 0xffffffff
78 #define XILINX_DPDMA_CNTL 0x100
79 #define XILINX_DPDMA_GBL 0x104
80 #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0)
81 #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6)
82 #define XILINX_DPDMA_ALC0_CNTL 0x108
83 #define XILINX_DPDMA_ALC0_STATUS 0x10c
84 #define XILINX_DPDMA_ALC0_MAX 0x110
85 #define XILINX_DPDMA_ALC0_MIN 0x114
86 #define XILINX_DPDMA_ALC0_ACC 0x118
87 #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
88 #define XILINX_DPDMA_ALC1_CNTL 0x120
89 #define XILINX_DPDMA_ALC1_STATUS 0x124
90 #define XILINX_DPDMA_ALC1_MAX 0x128
91 #define XILINX_DPDMA_ALC1_MIN 0x12c
92 #define XILINX_DPDMA_ALC1_ACC 0x130
93 #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
95 /* Channel register */
96 #define XILINX_DPDMA_CH_BASE 0x200
97 #define XILINX_DPDMA_CH_OFFSET 0x100
98 #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000
99 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0)
100 #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004
101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008
102 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c
103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010
104 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014
105 #define XILINX_DPDMA_CH_CNTL 0x018
106 #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
107 #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2)
109 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6)
110 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10)
111 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
112 #define XILINX_DPDMA_CH_STATUS 0x01c
113 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21)
114 #define XILINX_DPDMA_CH_VDO 0x020
115 #define XILINX_DPDMA_CH_PYLD_SZ 0x024
116 #define XILINX_DPDMA_CH_DESC_ID 0x028
117 #define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
119 /* DPDMA descriptor fields */
120 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
121 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
122 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
123 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
124 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
125 #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
126 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
127 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
128 #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0)
129 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0)
130 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18)
131 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0)
132 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16)
134 #define XILINX_DPDMA_ALIGN_BYTES 256
135 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
137 #define XILINX_DPDMA_NUM_CHAN 6
139 struct xilinx_dpdma_chan;
142 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
143 * @control: control configuration field
144 * @desc_id: descriptor ID
145 * @xfer_size: transfer size
146 * @hsize_stride: horizontal size and stride
147 * @timestamp_lsb: LSB of time stamp
148 * @timestamp_msb: MSB of time stamp
149 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
150 * @next_desc: next descriptor 32 bit address
151 * @src_addr: payload source address (1st page, 32 LSB)
152 * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
153 * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
154 * @src_addr2: payload source address (2nd page, 32 LSB)
155 * @src_addr3: payload source address (3rd page, 32 LSB)
156 * @src_addr4: payload source address (4th page, 32 LSB)
157 * @src_addr5: payload source address (5th page, 32 LSB)
158 * @crc: descriptor CRC
160 struct xilinx_dpdma_hw_desc {
177 } __aligned(XILINX_DPDMA_ALIGN_BYTES);
180 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
181 * @hw: DPDMA hardware descriptor
182 * @node: list node for software descriptors
183 * @dma_addr: DMA address of the software descriptor
185 struct xilinx_dpdma_sw_desc {
186 struct xilinx_dpdma_hw_desc hw;
187 struct list_head node;
192 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
193 * @vdesc: virtual DMA descriptor
195 * @descriptors: list of software descriptors
196 * @error: an error has been detected with this descriptor
198 struct xilinx_dpdma_tx_desc {
199 struct virt_dma_desc vdesc;
200 struct xilinx_dpdma_chan *chan;
201 struct list_head descriptors;
205 #define to_dpdma_tx_desc(_desc) \
206 container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
209 * struct xilinx_dpdma_chan - DPDMA channel
210 * @vchan: virtual DMA channel
211 * @reg: register base address
213 * @wait_to_stop: queue to wait for outstanding transacitons before stopping
214 * @running: true if the channel is running
215 * @first_frame: flag for the first frame of stream
216 * @video_group: flag if multi-channel operation is needed for video channels
217 * @lock: lock to access struct xilinx_dpdma_chan
218 * @desc_pool: descriptor allocation pool
219 * @err_task: error IRQ bottom half handler
220 * @desc: References to descriptors being processed
221 * @desc.pending: Descriptor schedule to the hardware, pending execution
222 * @desc.active: Descriptor being executed by the hardware
223 * @xdev: DPDMA device
225 struct xilinx_dpdma_chan {
226 struct virt_dma_chan vchan;
230 wait_queue_head_t wait_to_stop;
235 spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
236 struct dma_pool *desc_pool;
237 struct tasklet_struct err_task;
240 struct xilinx_dpdma_tx_desc *pending;
241 struct xilinx_dpdma_tx_desc *active;
244 struct xilinx_dpdma_device *xdev;
247 #define to_xilinx_chan(_chan) \
248 container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
251 * struct xilinx_dpdma_device - DPDMA device
252 * @common: generic dma device structure
253 * @reg: register base address
254 * @dev: generic device structure
255 * @irq: the interrupt number
256 * @axi_clk: axi clock
257 * @chan: DPDMA channels
258 * @ext_addr: flag for 64 bit system (48 bit addressing)
260 struct xilinx_dpdma_device {
261 struct dma_device common;
267 struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
272 /* -----------------------------------------------------------------------------
275 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
276 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
278 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
279 enum xilinx_dpdma_testcases {
284 struct xilinx_dpdma_debugfs {
285 enum xilinx_dpdma_testcases testcase;
286 u16 xilinx_dpdma_irq_done_count;
287 unsigned int chan_id;
290 static struct xilinx_dpdma_debugfs dpdma_debugfs;
291 struct xilinx_dpdma_debugfs_request {
293 enum xilinx_dpdma_testcases tc;
294 ssize_t (*read)(char *buf);
295 int (*write)(char *args);
298 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
300 if (IS_ENABLED(CONFIG_DEBUG_FS) && chan->id == dpdma_debugfs.chan_id)
301 dpdma_debugfs.xilinx_dpdma_irq_done_count++;
304 static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
308 dpdma_debugfs.testcase = DPDMA_TC_NONE;
310 out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
311 out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
313 snprintf(buf, out_str_len, "%d",
314 dpdma_debugfs.xilinx_dpdma_irq_done_count);
319 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
325 arg = strsep(&args, " ");
326 if (!arg || strncasecmp(arg, "start", 5))
329 arg = strsep(&args, " ");
333 ret = kstrtou32(arg, 0, &id);
337 if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
340 dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
341 dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
342 dpdma_debugfs.chan_id = id;
347 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
348 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
350 .name = "DESCRIPTOR_DONE_INTR",
351 .tc = DPDMA_TC_INTR_DONE,
352 .read = xilinx_dpdma_debugfs_desc_done_irq_read,
353 .write = xilinx_dpdma_debugfs_desc_done_irq_write,
357 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
358 size_t size, loff_t *pos)
360 enum xilinx_dpdma_testcases testcase;
364 if (*pos != 0 || size <= 0)
367 kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
369 dpdma_debugfs.testcase = DPDMA_TC_NONE;
373 testcase = READ_ONCE(dpdma_debugfs.testcase);
374 if (testcase != DPDMA_TC_NONE) {
375 ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
379 strscpy(kern_buff, "No testcase executed",
380 XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
383 size = min(size, strlen(kern_buff));
384 if (copy_to_user(buf, kern_buff, size))
396 static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
397 const char __user *buf, size_t size,
400 char *kern_buff, *kern_buff_start;
405 if (*pos != 0 || size <= 0)
408 /* Supporting single instance of test as of now. */
409 if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
412 kern_buff = kzalloc(size, GFP_KERNEL);
415 kern_buff_start = kern_buff;
417 ret = strncpy_from_user(kern_buff, buf, size);
421 /* Read the testcase name from a user request. */
422 testcase = strsep(&kern_buff, " ");
424 for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
425 if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
429 if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
434 ret = dpdma_debugfs_reqs[i].write(kern_buff);
441 kfree(kern_buff_start);
445 static const struct file_operations fops_xilinx_dpdma_dbgfs = {
446 .owner = THIS_MODULE,
447 .read = xilinx_dpdma_debugfs_read,
448 .write = xilinx_dpdma_debugfs_write,
451 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
455 dpdma_debugfs.testcase = DPDMA_TC_NONE;
457 dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
458 NULL, &fops_xilinx_dpdma_dbgfs);
460 dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
463 /* -----------------------------------------------------------------------------
467 static inline u32 dpdma_read(void __iomem *base, u32 offset)
469 return ioread32(base + offset);
472 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
474 iowrite32(val, base + offset);
477 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
479 dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
482 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
484 dpdma_write(base, offset, dpdma_read(base, offset) | set);
487 /* -----------------------------------------------------------------------------
488 * Descriptor Operations
492 * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor
493 * @xdev: DPDMA device
494 * @sw_desc: The software descriptor in which to set DMA addresses
495 * @prev: The previous descriptor
496 * @dma_addr: array of dma addresses
497 * @num_src_addr: number of addresses in @dma_addr
499 * Set all the DMA addresses in the hardware descriptor corresponding to @dev
500 * from @dma_addr. If a previous descriptor is specified in @prev, its next
501 * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be
502 * identical to @sw_desc for cyclic transfers.
504 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
505 struct xilinx_dpdma_sw_desc *sw_desc,
506 struct xilinx_dpdma_sw_desc *prev,
507 dma_addr_t dma_addr[],
508 unsigned int num_src_addr)
510 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
513 hw_desc->src_addr = lower_32_bits(dma_addr[0]);
516 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK,
517 upper_32_bits(dma_addr[0]));
519 for (i = 1; i < num_src_addr; i++) {
520 u32 *addr = &hw_desc->src_addr2;
522 addr[i - 1] = lower_32_bits(dma_addr[i]);
524 if (xdev->ext_addr) {
525 u32 *addr_ext = &hw_desc->addr_ext_23;
528 addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0);
529 addr_msb <<= 16 * ((i - 1) % 2);
530 addr_ext[(i - 1) / 2] |= addr_msb;
537 prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
540 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
541 upper_32_bits(sw_desc->dma_addr));
545 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
546 * @chan: DPDMA channel
548 * Allocate a software descriptor from the channel's descriptor pool.
550 * Return: a software descriptor or NULL.
552 static struct xilinx_dpdma_sw_desc *
553 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
555 struct xilinx_dpdma_sw_desc *sw_desc;
558 sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr);
562 sw_desc->dma_addr = dma_addr;
568 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
569 * @chan: DPDMA channel
570 * @sw_desc: software descriptor to free
572 * Free a software descriptor from the channel's descriptor pool.
575 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
576 struct xilinx_dpdma_sw_desc *sw_desc)
578 dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr);
582 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
583 * @chan: DPDMA channel
584 * @tx_desc: tx descriptor to dump
586 * Dump contents of a tx descriptor
588 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
589 struct xilinx_dpdma_tx_desc *tx_desc)
591 struct xilinx_dpdma_sw_desc *sw_desc;
592 struct device *dev = chan->xdev->dev;
595 dev_dbg(dev, "------- TX descriptor dump start -------\n");
596 dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
598 list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
599 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
601 dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
602 dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr);
603 dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
604 dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
605 dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
606 dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
607 dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
608 dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
609 dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
610 dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
611 dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
612 dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
613 dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
614 dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
615 dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
616 dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
617 dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
618 dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
621 dev_dbg(dev, "------- TX descriptor dump end -------\n");
625 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
626 * @chan: DPDMA channel
628 * Allocate a tx descriptor.
630 * Return: a tx descriptor or NULL.
632 static struct xilinx_dpdma_tx_desc *
633 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
635 struct xilinx_dpdma_tx_desc *tx_desc;
637 tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT);
641 INIT_LIST_HEAD(&tx_desc->descriptors);
642 tx_desc->chan = chan;
643 tx_desc->error = false;
649 * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor
650 * @vdesc: virtual DMA descriptor
652 * Free the virtual DMA descriptor @vdesc including its software descriptors.
654 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
656 struct xilinx_dpdma_sw_desc *sw_desc, *next;
657 struct xilinx_dpdma_tx_desc *desc;
662 desc = to_dpdma_tx_desc(vdesc);
664 list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
665 list_del(&sw_desc->node);
666 xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc);
673 * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
675 * @chan: DPDMA channel
676 * @xt: dma interleaved template
678 * Prepare a tx descriptor including internal software/hardware descriptors
681 * Return: A DPDMA TX descriptor on success, or NULL.
683 static struct xilinx_dpdma_tx_desc *
684 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
685 struct dma_interleaved_template *xt)
687 struct xilinx_dpdma_tx_desc *tx_desc;
688 struct xilinx_dpdma_sw_desc *sw_desc;
689 struct xilinx_dpdma_hw_desc *hw_desc;
690 size_t hsize = xt->sgl[0].size;
691 size_t stride = hsize + xt->sgl[0].icg;
693 if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
694 dev_err(chan->xdev->dev,
695 "chan%u: buffer should be aligned at %d B\n",
696 chan->id, XILINX_DPDMA_ALIGN_BYTES);
700 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
704 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
706 xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
710 xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc,
713 hw_desc = &sw_desc->hw;
714 hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
715 hw_desc->xfer_size = hsize * xt->numf;
716 hw_desc->hsize_stride =
717 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) |
718 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
720 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
721 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
722 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
723 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
725 list_add_tail(&sw_desc->node, &tx_desc->descriptors);
730 /* -----------------------------------------------------------------------------
731 * DPDMA Channel Operations
735 * xilinx_dpdma_chan_enable - Enable the channel
736 * @chan: DPDMA channel
738 * Enable the channel and its interrupts. Set the QoS values for video class.
740 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
744 reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id)
745 | XILINX_DPDMA_INTR_GLOBAL_MASK;
746 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
747 reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)
748 | XILINX_DPDMA_INTR_GLOBAL_ERR;
749 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
751 reg = XILINX_DPDMA_CH_CNTL_ENABLE
752 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK,
753 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
754 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK,
755 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
756 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK,
757 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS);
758 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
762 * xilinx_dpdma_chan_disable - Disable the channel
763 * @chan: DPDMA channel
765 * Disable the channel and its interrupts.
767 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
771 reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
772 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
773 reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
774 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
776 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
780 * xilinx_dpdma_chan_pause - Pause the channel
781 * @chan: DPDMA channel
785 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
787 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
791 * xilinx_dpdma_chan_unpause - Unpause the channel
792 * @chan: DPDMA channel
794 * Unpause the channel.
796 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
798 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
801 static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
803 struct xilinx_dpdma_device *xdev = chan->xdev;
807 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
808 if (xdev->chan[i]->video_group && !xdev->chan[i]->running)
811 if (xdev->chan[i]->video_group)
819 * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
820 * @chan: DPDMA channel
822 * Queue the next descriptor, if any, to the hardware. If the channel is
823 * stopped, start it first. Otherwise retrigger it with the next descriptor.
825 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
827 struct xilinx_dpdma_device *xdev = chan->xdev;
828 struct xilinx_dpdma_sw_desc *sw_desc;
829 struct xilinx_dpdma_tx_desc *desc;
830 struct virt_dma_desc *vdesc;
834 lockdep_assert_held(&chan->lock);
836 if (chan->desc.pending)
839 if (!chan->running) {
840 xilinx_dpdma_chan_unpause(chan);
841 xilinx_dpdma_chan_enable(chan);
842 chan->first_frame = true;
843 chan->running = true;
846 vdesc = vchan_next_desc(&chan->vchan);
850 desc = to_dpdma_tx_desc(vdesc);
851 chan->desc.pending = desc;
852 list_del(&desc->vdesc.node);
855 * Assign the cookie to descriptors in this transaction. Only 16 bit
856 * will be used, but it should be enough.
858 list_for_each_entry(sw_desc, &desc->descriptors, node)
859 sw_desc->hw.desc_id = desc->vdesc.tx.cookie
860 & XILINX_DPDMA_CH_DESC_ID_MASK;
862 sw_desc = list_first_entry(&desc->descriptors,
863 struct xilinx_dpdma_sw_desc, node);
864 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
865 lower_32_bits(sw_desc->dma_addr));
867 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
868 FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
869 upper_32_bits(sw_desc->dma_addr)));
871 first_frame = chan->first_frame;
872 chan->first_frame = false;
874 if (chan->video_group) {
875 channels = xilinx_dpdma_chan_video_group_ready(chan);
877 * Trigger the transfer only when all channels in the group are
883 channels = BIT(chan->id);
887 reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
889 reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
891 dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
895 * xilinx_dpdma_chan_ostand - Number of outstanding transactions
896 * @chan: DPDMA channel
898 * Read and return the number of outstanding transactions from register.
900 * Return: Number of outstanding transactions from the status register.
902 static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
904 return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK,
905 dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS));
909 * xilinx_dpdma_chan_notify_no_ostand - Notify no outstanding transaction event
910 * @chan: DPDMA channel
912 * Notify waiters for no outstanding event, so waiters can stop the channel
913 * safely. This function is supposed to be called when 'no outstanding'
914 * interrupt is generated. The 'no outstanding' interrupt is disabled and
915 * should be re-enabled when this event is handled. If the channel status
916 * register still shows some number of outstanding transactions, the interrupt
919 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
922 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
926 cnt = xilinx_dpdma_chan_ostand(chan);
928 dev_dbg(chan->xdev->dev,
929 "chan%u: %d outstanding transactions\n",
934 /* Disable 'no outstanding' interrupt */
935 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
936 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
937 wake_up(&chan->wait_to_stop);
943 * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq
944 * @chan: DPDMA channel
946 * Wait for the no outstanding transaction interrupt. This functions can sleep
949 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
950 * from wait_event_interruptible_timeout().
952 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
956 /* Wait for a no outstanding transaction interrupt upto 50msec */
957 ret = wait_event_interruptible_timeout(chan->wait_to_stop,
958 !xilinx_dpdma_chan_ostand(chan),
959 msecs_to_jiffies(50));
961 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
962 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
966 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
967 chan->id, xilinx_dpdma_chan_ostand(chan));
976 * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
977 * @chan: DPDMA channel
979 * Poll the outstanding transaction status, and return when there's no
980 * outstanding transaction. This functions can be used in the interrupt context
981 * or where the atomicity is required. Calling thread may wait more than 50ms.
983 * Return: 0 on success, or -ETIMEDOUT.
985 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
987 u32 cnt, loop = 50000;
989 /* Poll at least for 50ms (20 fps). */
991 cnt = xilinx_dpdma_chan_ostand(chan);
993 } while (loop-- > 0 && cnt);
996 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
997 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
1001 dev_err(chan->xdev->dev, "chan%u: not ready to stop: %d trans\n",
1002 chan->id, xilinx_dpdma_chan_ostand(chan));
1008 * xilinx_dpdma_chan_stop - Stop the channel
1009 * @chan: DPDMA channel
1011 * Stop a previously paused channel by first waiting for completion of all
1012 * outstanding transaction and then disabling the channel.
1014 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1016 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1018 unsigned long flags;
1021 ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1025 spin_lock_irqsave(&chan->lock, flags);
1026 xilinx_dpdma_chan_disable(chan);
1027 chan->running = false;
1028 spin_unlock_irqrestore(&chan->lock, flags);
1034 * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion
1035 * @chan: DPDMA channel
1037 * Handle completion of the currently active descriptor (@chan->desc.active). As
1038 * we currently support cyclic transfers only, this just invokes the cyclic
1039 * callback. The descriptor will be completed at the VSYNC interrupt when a new
1040 * descriptor replaces it.
1042 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
1044 struct xilinx_dpdma_tx_desc *active;
1045 unsigned long flags;
1047 spin_lock_irqsave(&chan->lock, flags);
1049 xilinx_dpdma_debugfs_desc_done_irq(chan);
1051 active = chan->desc.active;
1053 vchan_cyclic_callback(&active->vdesc);
1055 dev_warn(chan->xdev->dev,
1056 "chan%u: DONE IRQ with no active descriptor!\n",
1059 spin_unlock_irqrestore(&chan->lock, flags);
1063 * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling
1064 * @chan: DPDMA channel
1066 * At VSYNC the active descriptor may have been replaced by the pending
1067 * descriptor. Detect this through the DESC_ID and perform appropriate
1070 static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
1072 struct xilinx_dpdma_tx_desc *pending;
1073 struct xilinx_dpdma_sw_desc *sw_desc;
1074 unsigned long flags;
1077 spin_lock_irqsave(&chan->lock, flags);
1079 pending = chan->desc.pending;
1080 if (!chan->running || !pending)
1083 desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
1084 & XILINX_DPDMA_CH_DESC_ID_MASK;
1086 /* If the retrigger raced with vsync, retry at the next frame. */
1087 sw_desc = list_first_entry(&pending->descriptors,
1088 struct xilinx_dpdma_sw_desc, node);
1089 if (sw_desc->hw.desc_id != desc_id) {
1090 dev_dbg(chan->xdev->dev,
1091 "chan%u: vsync race lost (%u != %u), retrying\n",
1092 chan->id, sw_desc->hw.desc_id, desc_id);
1097 * Complete the active descriptor, if any, promote the pending
1098 * descriptor to active, and queue the next transfer, if any.
1100 if (chan->desc.active)
1101 vchan_cookie_complete(&chan->desc.active->vdesc);
1102 chan->desc.active = pending;
1103 chan->desc.pending = NULL;
1105 xilinx_dpdma_chan_queue_transfer(chan);
1108 spin_unlock_irqrestore(&chan->lock, flags);
1112 * xilinx_dpdma_chan_err - Detect any channel error
1113 * @chan: DPDMA channel
1114 * @isr: masked Interrupt Status Register
1115 * @eisr: Error Interrupt Status Register
1117 * Return: true if any channel error occurs, or false otherwise.
1120 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1125 if (chan->running &&
1126 ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1127 (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1134 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1135 * @chan: DPDMA channel
1137 * This function is called when any channel error or any global error occurs.
1138 * The function disables the paused channel by errors and determines
1139 * if the current active descriptor can be rescheduled depending on
1140 * the descriptor status.
1142 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1144 struct xilinx_dpdma_device *xdev = chan->xdev;
1145 struct xilinx_dpdma_tx_desc *active;
1146 unsigned long flags;
1148 spin_lock_irqsave(&chan->lock, flags);
1150 dev_dbg(xdev->dev, "chan%u: cur desc addr = 0x%04x%08x\n",
1152 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1153 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1154 dev_dbg(xdev->dev, "chan%u: cur payload addr = 0x%04x%08x\n",
1156 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1157 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1159 xilinx_dpdma_chan_disable(chan);
1160 chan->running = false;
1162 if (!chan->desc.active)
1165 active = chan->desc.active;
1166 chan->desc.active = NULL;
1168 xilinx_dpdma_chan_dump_tx_desc(chan, active);
1171 dev_dbg(xdev->dev, "chan%u: repeated error on desc\n",
1174 /* Reschedule if there's no new descriptor */
1175 if (!chan->desc.pending &&
1176 list_empty(&chan->vchan.desc_issued)) {
1177 active->error = true;
1178 list_add_tail(&active->vdesc.node,
1179 &chan->vchan.desc_issued);
1181 xilinx_dpdma_chan_free_tx_desc(&active->vdesc);
1185 spin_unlock_irqrestore(&chan->lock, flags);
1188 /* -----------------------------------------------------------------------------
1189 * DMA Engine Operations
1192 static struct dma_async_tx_descriptor *
1193 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1194 struct dma_interleaved_template *xt,
1195 unsigned long flags)
1197 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1198 struct xilinx_dpdma_tx_desc *desc;
1200 if (xt->dir != DMA_MEM_TO_DEV)
1203 if (!xt->numf || !xt->sgl[0].size)
1206 if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT))
1209 desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt);
1213 vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK);
1215 return &desc->vdesc.tx;
1219 * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel
1220 * @dchan: DMA channel
1222 * Allocate a descriptor pool for the channel.
1224 * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1226 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1228 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1229 size_t align = __alignof__(struct xilinx_dpdma_sw_desc);
1231 chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1233 sizeof(struct xilinx_dpdma_sw_desc),
1235 if (!chan->desc_pool) {
1236 dev_err(chan->xdev->dev,
1237 "chan%u: failed to allocate a descriptor pool\n",
1246 * xilinx_dpdma_free_chan_resources - Free all resources for the channel
1247 * @dchan: DMA channel
1249 * Free resources associated with the virtual DMA channel, and destroy the
1252 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1254 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1256 vchan_free_chan_resources(&chan->vchan);
1258 dma_pool_destroy(chan->desc_pool);
1259 chan->desc_pool = NULL;
1262 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1264 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1265 unsigned long flags;
1267 spin_lock_irqsave(&chan->vchan.lock, flags);
1268 if (vchan_issue_pending(&chan->vchan))
1269 xilinx_dpdma_chan_queue_transfer(chan);
1270 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1273 static int xilinx_dpdma_config(struct dma_chan *dchan,
1274 struct dma_slave_config *config)
1276 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1277 struct xilinx_dpdma_peripheral_config *pconfig;
1278 unsigned long flags;
1281 * The destination address doesn't need to be specified as the DPDMA is
1282 * hardwired to the destination (the DP controller). The transfer
1283 * width, burst size and port window size are thus meaningless, they're
1284 * fixed both on the DPDMA side and on the DP controller side.
1288 * Use the peripheral_config to indicate that the channel is part
1289 * of a video group. This requires matching use of the custom
1290 * structure in each driver.
1292 pconfig = config->peripheral_config;
1293 if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig)))
1296 spin_lock_irqsave(&chan->lock, flags);
1297 if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig)
1298 chan->video_group = pconfig->video_group;
1299 spin_unlock_irqrestore(&chan->lock, flags);
1304 static int xilinx_dpdma_pause(struct dma_chan *dchan)
1306 xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1311 static int xilinx_dpdma_resume(struct dma_chan *dchan)
1313 xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1319 * xilinx_dpdma_terminate_all - Terminate the channel and descriptors
1320 * @dchan: DMA channel
1322 * Pause the channel without waiting for ongoing transfers to complete. Waiting
1323 * for completion is performed by xilinx_dpdma_synchronize() that will disable
1324 * the channel to complete the stop.
1326 * All the descriptors associated with the channel that are guaranteed not to
1327 * be touched by the hardware. The pending and active descriptor are not
1328 * touched, and will be freed either upon completion, or by
1329 * xilinx_dpdma_synchronize().
1331 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1333 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1335 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1336 struct xilinx_dpdma_device *xdev = chan->xdev;
1337 LIST_HEAD(descriptors);
1338 unsigned long flags;
1341 /* Pause the channel (including the whole video group if applicable). */
1342 if (chan->video_group) {
1343 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
1344 if (xdev->chan[i]->video_group &&
1345 xdev->chan[i]->running) {
1346 xilinx_dpdma_chan_pause(xdev->chan[i]);
1347 xdev->chan[i]->video_group = false;
1351 xilinx_dpdma_chan_pause(chan);
1354 /* Gather all the descriptors we can free and free them. */
1355 spin_lock_irqsave(&chan->vchan.lock, flags);
1356 vchan_get_all_descriptors(&chan->vchan, &descriptors);
1357 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1359 vchan_dma_desc_free_list(&chan->vchan, &descriptors);
1365 * xilinx_dpdma_synchronize - Synchronize callback execution
1366 * @dchan: DMA channel
1368 * Synchronizing callback execution ensures that all previously issued
1369 * transfers have completed and all associated callbacks have been called and
1372 * This function waits for the DMA channel to stop. It assumes it has been
1373 * paused by a previous call to dmaengine_terminate_async(), and that no new
1374 * pending descriptors have been issued with dma_async_issue_pending(). The
1375 * behaviour is undefined otherwise.
1377 static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
1379 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1380 unsigned long flags;
1382 xilinx_dpdma_chan_stop(chan);
1384 spin_lock_irqsave(&chan->vchan.lock, flags);
1385 if (chan->desc.pending) {
1386 vchan_terminate_vdesc(&chan->desc.pending->vdesc);
1387 chan->desc.pending = NULL;
1389 if (chan->desc.active) {
1390 vchan_terminate_vdesc(&chan->desc.active->vdesc);
1391 chan->desc.active = NULL;
1393 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1395 vchan_synchronize(&chan->vchan);
1398 /* -----------------------------------------------------------------------------
1399 * Interrupt and Tasklet Handling
1403 * xilinx_dpdma_err - Detect any global error
1404 * @isr: Interrupt Status Register
1405 * @eisr: Error Interrupt Status Register
1407 * Return: True if any global error occurs, or false otherwise.
1409 static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1411 if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1412 eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR)
1419 * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt
1420 * @xdev: DPDMA device
1421 * @isr: masked Interrupt Status Register
1422 * @eisr: Error Interrupt Status Register
1424 * Handle if any error occurs based on @isr and @eisr. This function disables
1425 * corresponding error interrupts, and those should be re-enabled once handling
1428 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev,
1431 bool err = xilinx_dpdma_err(isr, eisr);
1434 dev_dbg_ratelimited(xdev->dev,
1435 "error irq: isr = 0x%08x, eisr = 0x%08x\n",
1438 /* Disable channel error interrupts until errors are handled. */
1439 dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1440 isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1441 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1442 eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1444 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1445 if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1446 tasklet_schedule(&xdev->chan[i]->err_task);
1450 * xilinx_dpdma_enable_irq - Enable interrupts
1451 * @xdev: DPDMA device
1453 * Enable interrupts.
1455 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
1457 dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1458 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1462 * xilinx_dpdma_disable_irq - Disable interrupts
1463 * @xdev: DPDMA device
1465 * Disable interrupts.
1467 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
1469 dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
1470 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1474 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1475 * @t: pointer to the tasklet associated with this handler
1477 * Per channel error handling tasklet. This function waits for the outstanding
1478 * transaction to complete and triggers error handling. After error handling,
1479 * re-enable channel error interrupts, and restart the channel if needed.
1481 static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1483 struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
1484 struct xilinx_dpdma_device *xdev = chan->xdev;
1485 unsigned long flags;
1487 /* Proceed error handling even when polling fails. */
1488 xilinx_dpdma_chan_poll_no_ostand(chan);
1490 xilinx_dpdma_chan_handle_err(chan);
1492 dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1493 XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1494 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1495 XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1497 spin_lock_irqsave(&chan->lock, flags);
1498 xilinx_dpdma_chan_queue_transfer(chan);
1499 spin_unlock_irqrestore(&chan->lock, flags);
1502 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
1504 struct xilinx_dpdma_device *xdev = data;
1510 status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
1511 error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
1512 if (!status && !error)
1515 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
1516 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
1518 if (status & XILINX_DPDMA_INTR_VSYNC) {
1520 * There's a single VSYNC interrupt that needs to be processed
1521 * by each running channel to update the active descriptor.
1523 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1524 struct xilinx_dpdma_chan *chan = xdev->chan[i];
1527 xilinx_dpdma_chan_vsync_irq(chan);
1531 mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status);
1533 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1534 xilinx_dpdma_chan_done_irq(xdev->chan[i]);
1537 mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status);
1539 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1540 xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
1543 mask = status & XILINX_DPDMA_INTR_ERR_ALL;
1545 xilinx_dpdma_handle_err_irq(xdev, mask, error);
1550 /* -----------------------------------------------------------------------------
1551 * Initialization & Cleanup
1554 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
1555 unsigned int chan_id)
1557 struct xilinx_dpdma_chan *chan;
1559 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1564 chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE
1565 + XILINX_DPDMA_CH_OFFSET * chan->id;
1566 chan->running = false;
1569 spin_lock_init(&chan->lock);
1570 init_waitqueue_head(&chan->wait_to_stop);
1572 tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
1574 chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
1575 vchan_init(&chan->vchan, &xdev->common);
1577 xdev->chan[chan->id] = chan;
1582 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
1587 tasklet_kill(&chan->err_task);
1588 list_del(&chan->vchan.chan.device_node);
1591 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1592 struct of_dma *ofdma)
1594 struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
1595 u32 chan_id = dma_spec->args[0];
1597 if (chan_id >= ARRAY_SIZE(xdev->chan))
1600 if (!xdev->chan[chan_id])
1603 return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
1606 static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
1611 /* Disable all interrupts */
1612 xilinx_dpdma_disable_irq(xdev);
1614 /* Stop all channels */
1615 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1616 reg = xdev->reg + XILINX_DPDMA_CH_BASE
1617 + XILINX_DPDMA_CH_OFFSET * i;
1618 dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
1621 /* Clear the interrupt status registers */
1622 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
1623 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
1626 static int xilinx_dpdma_probe(struct platform_device *pdev)
1628 struct xilinx_dpdma_device *xdev;
1629 struct dma_device *ddev;
1633 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1637 xdev->dev = &pdev->dev;
1638 xdev->ext_addr = sizeof(dma_addr_t) > 4;
1640 INIT_LIST_HEAD(&xdev->common.channels);
1642 platform_set_drvdata(pdev, xdev);
1644 xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
1645 if (IS_ERR(xdev->axi_clk))
1646 return PTR_ERR(xdev->axi_clk);
1648 xdev->reg = devm_platform_ioremap_resource(pdev, 0);
1649 if (IS_ERR(xdev->reg))
1650 return PTR_ERR(xdev->reg);
1652 dpdma_hw_init(xdev);
1654 xdev->irq = platform_get_irq(pdev, 0);
1658 ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
1659 dev_name(xdev->dev), xdev);
1661 dev_err(xdev->dev, "failed to request IRQ\n");
1665 ddev = &xdev->common;
1666 ddev->dev = &pdev->dev;
1668 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1669 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1670 dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
1671 dma_cap_set(DMA_REPEAT, ddev->cap_mask);
1672 dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
1673 ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
1675 ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
1676 ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1677 ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
1678 /* TODO: Can we achieve better granularity ? */
1679 ddev->device_tx_status = dma_cookie_status;
1680 ddev->device_issue_pending = xilinx_dpdma_issue_pending;
1681 ddev->device_config = xilinx_dpdma_config;
1682 ddev->device_pause = xilinx_dpdma_pause;
1683 ddev->device_resume = xilinx_dpdma_resume;
1684 ddev->device_terminate_all = xilinx_dpdma_terminate_all;
1685 ddev->device_synchronize = xilinx_dpdma_synchronize;
1686 ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
1687 ddev->directions = BIT(DMA_MEM_TO_DEV);
1688 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1690 for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) {
1691 ret = xilinx_dpdma_chan_init(xdev, i);
1693 dev_err(xdev->dev, "failed to initialize channel %u\n",
1699 ret = clk_prepare_enable(xdev->axi_clk);
1701 dev_err(xdev->dev, "failed to enable the axi clock\n");
1705 ret = dma_async_device_register(ddev);
1707 dev_err(xdev->dev, "failed to register the dma device\n");
1708 goto error_dma_async;
1711 ret = of_dma_controller_register(xdev->dev->of_node,
1712 of_dma_xilinx_xlate, ddev);
1714 dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
1718 xilinx_dpdma_enable_irq(xdev);
1720 xilinx_dpdma_debugfs_init(xdev);
1722 dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
1727 dma_async_device_unregister(ddev);
1729 clk_disable_unprepare(xdev->axi_clk);
1731 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1732 xilinx_dpdma_chan_remove(xdev->chan[i]);
1734 free_irq(xdev->irq, xdev);
1739 static int xilinx_dpdma_remove(struct platform_device *pdev)
1741 struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
1744 /* Start by disabling the IRQ to avoid races during cleanup. */
1745 free_irq(xdev->irq, xdev);
1747 xilinx_dpdma_disable_irq(xdev);
1748 of_dma_controller_free(pdev->dev.of_node);
1749 dma_async_device_unregister(&xdev->common);
1750 clk_disable_unprepare(xdev->axi_clk);
1752 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1753 xilinx_dpdma_chan_remove(xdev->chan[i]);
1758 static const struct of_device_id xilinx_dpdma_of_match[] = {
1759 { .compatible = "xlnx,zynqmp-dpdma",},
1760 { /* end of table */ },
1762 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
1764 static struct platform_driver xilinx_dpdma_driver = {
1765 .probe = xilinx_dpdma_probe,
1766 .remove = xilinx_dpdma_remove,
1768 .name = "xilinx-zynqmp-dpdma",
1769 .of_match_table = xilinx_dpdma_of_match,
1773 module_platform_driver(xilinx_dpdma_driver);
1775 MODULE_AUTHOR("Xilinx, Inc.");
1776 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
1777 MODULE_LICENSE("GPL v2");