]>
Commit | Line | Data |
---|---|---|
e1f7c9ee LD |
1 | /* |
2 | * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems) | |
3 | * | |
4 | * Copyright (C) 2014 Atmel Corporation | |
5 | * | |
6 | * Author: Ludovic Desroches <[email protected]> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License version 2 as published by | |
10 | * the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License along with | |
18 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
20 | ||
21 | #include <asm/barrier.h> | |
22 | #include <dt-bindings/dma/at91.h> | |
23 | #include <linux/clk.h> | |
24 | #include <linux/dmaengine.h> | |
25 | #include <linux/dmapool.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/irq.h> | |
6d3a7d9e | 28 | #include <linux/kernel.h> |
e1f7c9ee LD |
29 | #include <linux/list.h> |
30 | #include <linux/module.h> | |
31 | #include <linux/of_dma.h> | |
32 | #include <linux/of_platform.h> | |
33 | #include <linux/platform_device.h> | |
34 | #include <linux/pm.h> | |
35 | ||
36 | #include "dmaengine.h" | |
37 | ||
38 | /* Global registers */ | |
39 | #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */ | |
40 | #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */ | |
41 | #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */ | |
42 | #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */ | |
43 | #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */ | |
44 | #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */ | |
45 | #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */ | |
46 | #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */ | |
47 | #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */ | |
48 | #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */ | |
49 | #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */ | |
50 | #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */ | |
51 | #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */ | |
52 | #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */ | |
53 | #define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */ | |
54 | #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */ | |
55 | #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */ | |
56 | #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */ | |
57 | #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */ | |
58 | #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */ | |
59 | #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */ | |
60 | ||
61 | /* Channel relative registers offsets */ | |
62 | #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */ | |
63 | #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */ | |
64 | #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */ | |
65 | #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */ | |
66 | #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */ | |
67 | #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */ | |
68 | #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */ | |
69 | #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */ | |
70 | #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */ | |
71 | #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */ | |
72 | #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */ | |
73 | #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */ | |
74 | #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */ | |
75 | #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */ | |
76 | #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */ | |
77 | #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */ | |
78 | #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */ | |
79 | #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */ | |
80 | #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */ | |
81 | #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */ | |
82 | #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */ | |
83 | #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */ | |
84 | #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */ | |
85 | #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */ | |
86 | #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */ | |
87 | #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */ | |
88 | #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */ | |
89 | #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */ | |
90 | #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */ | |
91 | #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */ | |
92 | #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */ | |
93 | #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */ | |
94 | #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */ | |
95 | #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */ | |
96 | #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */ | |
97 | #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */ | |
98 | #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */ | |
99 | #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */ | |
100 | #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ | |
101 | #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ | |
102 | #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ | |
103 | #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ | |
104 | #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ | |
105 | #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ | |
106 | #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */ | |
107 | #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */ | |
108 | #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */ | |
109 | #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */ | |
110 | #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */ | |
111 | #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */ | |
112 | #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */ | |
113 | #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1) | |
114 | #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1) | |
115 | #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1) | |
116 | #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1) | |
117 | #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1) | |
118 | #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */ | |
119 | #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4) | |
120 | #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4) | |
121 | #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */ | |
122 | #define AT_XDMAC_CC_PROT_SEC (0x0 << 5) | |
123 | #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5) | |
124 | #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */ | |
125 | #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6) | |
126 | #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6) | |
127 | #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */ | |
128 | #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7) | |
129 | #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7) | |
130 | #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */ | |
131 | #define AT_XDMAC_CC_DWIDTH_OFFSET 11 | |
132 | #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET) | |
133 | #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */ | |
134 | #define AT_XDMAC_CC_DWIDTH_BYTE 0x0 | |
135 | #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1 | |
136 | #define AT_XDMAC_CC_DWIDTH_WORD 0x2 | |
137 | #define AT_XDMAC_CC_DWIDTH_DWORD 0x3 | |
138 | #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */ | |
139 | #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */ | |
140 | #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */ | |
141 | #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16) | |
142 | #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16) | |
143 | #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16) | |
144 | #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16) | |
145 | #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */ | |
146 | #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18) | |
147 | #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18) | |
148 | #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18) | |
149 | #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18) | |
150 | #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */ | |
151 | #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21) | |
152 | #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21) | |
153 | #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */ | |
154 | #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22) | |
155 | #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22) | |
156 | #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ | |
157 | #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) | |
158 | #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) | |
15a03850 | 159 | #define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ |
e1f7c9ee LD |
160 | #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ |
161 | #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ | |
162 | #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ | |
163 | ||
164 | #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */ | |
165 | ||
166 | /* Microblock control members */ | |
167 | #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */ | |
168 | #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */ | |
169 | #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */ | |
170 | #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */ | |
171 | #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */ | |
172 | #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */ | |
173 | #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */ | |
174 | #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ | |
175 | ||
176 | #define AT_XDMAC_MAX_CHAN 0x20 | |
765c37d8 LD |
177 | #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ |
178 | #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ | |
25c5e962 | 179 | #define AT_XDMAC_RESIDUE_MAX_RETRIES 5 |
e1f7c9ee | 180 | |
8ac82f88 LD |
181 | #define AT_XDMAC_DMA_BUSWIDTHS\ |
182 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ | |
183 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ | |
184 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ | |
185 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ | |
186 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
187 | ||
e1f7c9ee LD |
188 | enum atc_status { |
189 | AT_XDMAC_CHAN_IS_CYCLIC = 0, | |
190 | AT_XDMAC_CHAN_IS_PAUSED, | |
191 | }; | |
192 | ||
193 | /* ----- Channels ----- */ | |
194 | struct at_xdmac_chan { | |
195 | struct dma_chan chan; | |
196 | void __iomem *ch_regs; | |
197 | u32 mask; /* Channel Mask */ | |
765c37d8 | 198 | u32 cfg; /* Channel Configuration Register */ |
e1f7c9ee LD |
199 | u8 perid; /* Peripheral ID */ |
200 | u8 perif; /* Peripheral Interface */ | |
201 | u8 memif; /* Memory Interface */ | |
734bb9a7 | 202 | u32 save_cc; |
e1f7c9ee LD |
203 | u32 save_cim; |
204 | u32 save_cnda; | |
205 | u32 save_cndc; | |
206 | unsigned long status; | |
207 | struct tasklet_struct tasklet; | |
765c37d8 | 208 | struct dma_slave_config sconfig; |
e1f7c9ee LD |
209 | |
210 | spinlock_t lock; | |
211 | ||
212 | struct list_head xfers_list; | |
213 | struct list_head free_descs_list; | |
214 | }; | |
215 | ||
216 | ||
217 | /* ----- Controller ----- */ | |
218 | struct at_xdmac { | |
219 | struct dma_device dma; | |
220 | void __iomem *regs; | |
221 | int irq; | |
222 | struct clk *clk; | |
223 | u32 save_gim; | |
224 | u32 save_gs; | |
225 | struct dma_pool *at_xdmac_desc_pool; | |
226 | struct at_xdmac_chan chan[0]; | |
227 | }; | |
228 | ||
229 | ||
230 | /* ----- Descriptors ----- */ | |
231 | ||
232 | /* Linked List Descriptor */ | |
233 | struct at_xdmac_lld { | |
234 | dma_addr_t mbr_nda; /* Next Descriptor Member */ | |
235 | u32 mbr_ubc; /* Microblock Control Member */ | |
236 | dma_addr_t mbr_sa; /* Source Address Member */ | |
237 | dma_addr_t mbr_da; /* Destination Address Member */ | |
238 | u32 mbr_cfg; /* Configuration Register */ | |
ee0fe35c MR |
239 | u32 mbr_bc; /* Block Control Register */ |
240 | u32 mbr_ds; /* Data Stride Register */ | |
241 | u32 mbr_sus; /* Source Microblock Stride Register */ | |
242 | u32 mbr_dus; /* Destination Microblock Stride Register */ | |
e1f7c9ee LD |
243 | }; |
244 | ||
4a9723e8 | 245 | /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ |
e1f7c9ee LD |
246 | struct at_xdmac_desc { |
247 | struct at_xdmac_lld lld; | |
248 | enum dma_transfer_direction direction; | |
249 | struct dma_async_tx_descriptor tx_dma_desc; | |
250 | struct list_head desc_node; | |
251 | /* Following members are only used by the first descriptor */ | |
252 | bool active_xfer; | |
253 | unsigned int xfer_size; | |
254 | struct list_head descs_list; | |
255 | struct list_head xfer_node; | |
4a9723e8 | 256 | } __aligned(sizeof(u64)); |
e1f7c9ee LD |
257 | |
258 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) | |
259 | { | |
260 | return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40); | |
261 | } | |
262 | ||
6e5ae29b | 263 | #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg)) |
e1f7c9ee | 264 | #define at_xdmac_write(atxdmac, reg, value) \ |
6e5ae29b | 265 | writel_relaxed((value), (atxdmac)->regs + (reg)) |
e1f7c9ee | 266 | |
6e5ae29b LD |
267 | #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg)) |
268 | #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg)) | |
e1f7c9ee LD |
269 | |
270 | static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan) | |
271 | { | |
272 | return container_of(dchan, struct at_xdmac_chan, chan); | |
273 | } | |
274 | ||
275 | static struct device *chan2dev(struct dma_chan *chan) | |
276 | { | |
277 | return &chan->dev->device; | |
278 | } | |
279 | ||
280 | static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev) | |
281 | { | |
282 | return container_of(ddev, struct at_xdmac, dma); | |
283 | } | |
284 | ||
285 | static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd) | |
286 | { | |
287 | return container_of(txd, struct at_xdmac_desc, tx_dma_desc); | |
288 | } | |
289 | ||
290 | static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan) | |
291 | { | |
292 | return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); | |
293 | } | |
294 | ||
295 | static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan) | |
296 | { | |
297 | return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | |
298 | } | |
299 | ||
300 | static inline int at_xdmac_csize(u32 maxburst) | |
301 | { | |
302 | int csize; | |
303 | ||
304 | csize = ffs(maxburst) - 1; | |
305 | if (csize > 4) | |
306 | csize = -EINVAL; | |
307 | ||
308 | return csize; | |
309 | }; | |
310 | ||
311 | static inline u8 at_xdmac_get_dwidth(u32 cfg) | |
312 | { | |
313 | return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET; | |
314 | }; | |
315 | ||
316 | static unsigned int init_nr_desc_per_channel = 64; | |
317 | module_param(init_nr_desc_per_channel, uint, 0644); | |
318 | MODULE_PARM_DESC(init_nr_desc_per_channel, | |
319 | "initial descriptors per channel (default: 64)"); | |
320 | ||
321 | ||
322 | static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan) | |
323 | { | |
324 | return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask; | |
325 | } | |
326 | ||
327 | static void at_xdmac_off(struct at_xdmac *atxdmac) | |
328 | { | |
329 | at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L); | |
330 | ||
331 | /* Wait that all chans are disabled. */ | |
332 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS)) | |
333 | cpu_relax(); | |
334 | ||
335 | at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L); | |
336 | } | |
337 | ||
338 | /* Call with lock hold. */ | |
339 | static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, | |
340 | struct at_xdmac_desc *first) | |
341 | { | |
342 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | |
343 | u32 reg; | |
344 | ||
345 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); | |
346 | ||
347 | if (at_xdmac_chan_is_enabled(atchan)) | |
348 | return; | |
349 | ||
350 | /* Set transfer as active to not try to start it again. */ | |
351 | first->active_xfer = true; | |
352 | ||
353 | /* Tell xdmac where to get the first descriptor. */ | |
354 | reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys) | |
355 | | AT_XDMAC_CNDA_NDAIF(atchan->memif); | |
356 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); | |
357 | ||
358 | /* | |
6d3a7d9e | 359 | * When doing non cyclic transfer we need to use the next |
e1f7c9ee LD |
360 | * descriptor view 2 since some fields of the configuration register |
361 | * depend on transfer size and src/dest addresses. | |
362 | */ | |
20cadcb4 | 363 | if (at_xdmac_chan_is_cyclic(atchan)) |
e1f7c9ee | 364 | reg = AT_XDMAC_CNDC_NDVIEW_NDV1; |
20cadcb4 | 365 | else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) |
ee0fe35c | 366 | reg = AT_XDMAC_CNDC_NDVIEW_NDV3; |
20cadcb4 | 367 | else |
e1f7c9ee | 368 | reg = AT_XDMAC_CNDC_NDVIEW_NDV2; |
20cadcb4 LD |
369 | /* |
370 | * Even if the register will be updated from the configuration in the | |
371 | * descriptor when using view 2 or higher, the PROT bit won't be set | |
372 | * properly. This bit can be modified only by using the channel | |
373 | * configuration register. | |
374 | */ | |
375 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); | |
e1f7c9ee LD |
376 | |
377 | reg |= AT_XDMAC_CNDC_NDDUP | |
378 | | AT_XDMAC_CNDC_NDSUP | |
379 | | AT_XDMAC_CNDC_NDE; | |
380 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg); | |
381 | ||
382 | dev_vdbg(chan2dev(&atchan->chan), | |
383 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | |
384 | __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), | |
385 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | |
386 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | |
387 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | |
388 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | |
389 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | |
390 | ||
391 | at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff); | |
392 | reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE; | |
393 | /* | |
394 | * There is no end of list when doing cyclic dma, we need to get | |
395 | * an interrupt after each periods. | |
396 | */ | |
397 | if (at_xdmac_chan_is_cyclic(atchan)) | |
398 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, | |
399 | reg | AT_XDMAC_CIE_BIE); | |
400 | else | |
401 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, | |
402 | reg | AT_XDMAC_CIE_LIE); | |
403 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask); | |
404 | dev_vdbg(chan2dev(&atchan->chan), | |
405 | "%s: enable channel (0x%08x)\n", __func__, atchan->mask); | |
406 | wmb(); | |
407 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); | |
408 | ||
409 | dev_vdbg(chan2dev(&atchan->chan), | |
410 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | |
411 | __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC), | |
412 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | |
413 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | |
414 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | |
415 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | |
416 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | |
417 | ||
418 | } | |
419 | ||
420 | static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) | |
421 | { | |
422 | struct at_xdmac_desc *desc = txd_to_at_desc(tx); | |
423 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); | |
424 | dma_cookie_t cookie; | |
4c374fc7 | 425 | unsigned long irqflags; |
e1f7c9ee | 426 | |
4c374fc7 | 427 | spin_lock_irqsave(&atchan->lock, irqflags); |
e1f7c9ee LD |
428 | cookie = dma_cookie_assign(tx); |
429 | ||
430 | dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", | |
431 | __func__, atchan, desc); | |
432 | list_add_tail(&desc->xfer_node, &atchan->xfers_list); | |
433 | if (list_is_singular(&atchan->xfers_list)) | |
434 | at_xdmac_start_xfer(atchan, desc); | |
435 | ||
4c374fc7 | 436 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
e1f7c9ee LD |
437 | return cookie; |
438 | } | |
439 | ||
440 | static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, | |
441 | gfp_t gfp_flags) | |
442 | { | |
443 | struct at_xdmac_desc *desc; | |
444 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); | |
445 | dma_addr_t phys; | |
446 | ||
447 | desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); | |
448 | if (desc) { | |
449 | memset(desc, 0, sizeof(*desc)); | |
450 | INIT_LIST_HEAD(&desc->descs_list); | |
451 | dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); | |
452 | desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; | |
453 | desc->tx_dma_desc.phys = phys; | |
454 | } | |
455 | ||
456 | return desc; | |
457 | } | |
458 | ||
192dc8c0 | 459 | static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) |
0be2136b LD |
460 | { |
461 | memset(&desc->lld, 0, sizeof(desc->lld)); | |
462 | INIT_LIST_HEAD(&desc->descs_list); | |
463 | desc->direction = DMA_TRANS_NONE; | |
464 | desc->xfer_size = 0; | |
465 | desc->active_xfer = false; | |
466 | } | |
467 | ||
e1f7c9ee LD |
468 | /* Call must be protected by lock. */ |
469 | static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) | |
470 | { | |
471 | struct at_xdmac_desc *desc; | |
472 | ||
473 | if (list_empty(&atchan->free_descs_list)) { | |
474 | desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); | |
475 | } else { | |
476 | desc = list_first_entry(&atchan->free_descs_list, | |
477 | struct at_xdmac_desc, desc_node); | |
478 | list_del(&desc->desc_node); | |
0be2136b | 479 | at_xdmac_init_used_desc(desc); |
e1f7c9ee LD |
480 | } |
481 | ||
482 | return desc; | |
483 | } | |
484 | ||
0d0ee751 MR |
485 | static void at_xdmac_queue_desc(struct dma_chan *chan, |
486 | struct at_xdmac_desc *prev, | |
487 | struct at_xdmac_desc *desc) | |
488 | { | |
489 | if (!prev || !desc) | |
490 | return; | |
491 | ||
492 | prev->lld.mbr_nda = desc->tx_dma_desc.phys; | |
493 | prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE; | |
494 | ||
495 | dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n", | |
496 | __func__, prev, &prev->lld.mbr_nda); | |
497 | } | |
498 | ||
6007ccb5 MR |
499 | static inline void at_xdmac_increment_block_count(struct dma_chan *chan, |
500 | struct at_xdmac_desc *desc) | |
501 | { | |
502 | if (!desc) | |
503 | return; | |
504 | ||
505 | desc->lld.mbr_bc++; | |
506 | ||
507 | dev_dbg(chan2dev(chan), | |
508 | "%s: incrementing the block count of the desc 0x%p\n", | |
509 | __func__, desc); | |
510 | } | |
511 | ||
e1f7c9ee LD |
512 | static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, |
513 | struct of_dma *of_dma) | |
514 | { | |
515 | struct at_xdmac *atxdmac = of_dma->of_dma_data; | |
516 | struct at_xdmac_chan *atchan; | |
517 | struct dma_chan *chan; | |
518 | struct device *dev = atxdmac->dma.dev; | |
519 | ||
520 | if (dma_spec->args_count != 1) { | |
521 | dev_err(dev, "dma phandler args: bad number of args\n"); | |
522 | return NULL; | |
523 | } | |
524 | ||
525 | chan = dma_get_any_slave_channel(&atxdmac->dma); | |
526 | if (!chan) { | |
527 | dev_err(dev, "can't get a dma channel\n"); | |
528 | return NULL; | |
529 | } | |
530 | ||
531 | atchan = to_at_xdmac_chan(chan); | |
532 | atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]); | |
533 | atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]); | |
534 | atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]); | |
535 | dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n", | |
536 | atchan->memif, atchan->perif, atchan->perid); | |
537 | ||
538 | return chan; | |
539 | } | |
540 | ||
765c37d8 LD |
541 | static int at_xdmac_compute_chan_conf(struct dma_chan *chan, |
542 | enum dma_transfer_direction direction) | |
543 | { | |
544 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
545 | int csize, dwidth; | |
546 | ||
547 | if (direction == DMA_DEV_TO_MEM) { | |
548 | atchan->cfg = | |
549 | AT91_XDMAC_DT_PERID(atchan->perid) | |
550 | | AT_XDMAC_CC_DAM_INCREMENTED_AM | |
551 | | AT_XDMAC_CC_SAM_FIXED_AM | |
552 | | AT_XDMAC_CC_DIF(atchan->memif) | |
553 | | AT_XDMAC_CC_SIF(atchan->perif) | |
554 | | AT_XDMAC_CC_SWREQ_HWR_CONNECTED | |
555 | | AT_XDMAC_CC_DSYNC_PER2MEM | |
556 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | |
557 | | AT_XDMAC_CC_TYPE_PER_TRAN; | |
558 | csize = ffs(atchan->sconfig.src_maxburst) - 1; | |
559 | if (csize < 0) { | |
560 | dev_err(chan2dev(chan), "invalid src maxburst value\n"); | |
561 | return -EINVAL; | |
562 | } | |
563 | atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); | |
564 | dwidth = ffs(atchan->sconfig.src_addr_width) - 1; | |
565 | if (dwidth < 0) { | |
566 | dev_err(chan2dev(chan), "invalid src addr width value\n"); | |
567 | return -EINVAL; | |
568 | } | |
569 | atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); | |
570 | } else if (direction == DMA_MEM_TO_DEV) { | |
571 | atchan->cfg = | |
572 | AT91_XDMAC_DT_PERID(atchan->perid) | |
573 | | AT_XDMAC_CC_DAM_FIXED_AM | |
574 | | AT_XDMAC_CC_SAM_INCREMENTED_AM | |
575 | | AT_XDMAC_CC_DIF(atchan->perif) | |
576 | | AT_XDMAC_CC_SIF(atchan->memif) | |
577 | | AT_XDMAC_CC_SWREQ_HWR_CONNECTED | |
578 | | AT_XDMAC_CC_DSYNC_MEM2PER | |
579 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | |
580 | | AT_XDMAC_CC_TYPE_PER_TRAN; | |
581 | csize = ffs(atchan->sconfig.dst_maxburst) - 1; | |
582 | if (csize < 0) { | |
583 | dev_err(chan2dev(chan), "invalid src maxburst value\n"); | |
584 | return -EINVAL; | |
585 | } | |
586 | atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); | |
587 | dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; | |
588 | if (dwidth < 0) { | |
589 | dev_err(chan2dev(chan), "invalid dst addr width value\n"); | |
590 | return -EINVAL; | |
591 | } | |
592 | atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); | |
593 | } | |
594 | ||
595 | dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); | |
596 | ||
597 | return 0; | |
598 | } | |
599 | ||
600 | /* | |
601 | * Only check that maxburst and addr width values are supported by the | |
602 | * the controller but not that the configuration is good to perform the | |
603 | * transfer since we don't know the direction at this stage. | |
604 | */ | |
605 | static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig) | |
606 | { | |
607 | if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) | |
608 | || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) | |
609 | return -EINVAL; | |
610 | ||
611 | if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) | |
612 | || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) | |
613 | return -EINVAL; | |
614 | ||
615 | return 0; | |
616 | } | |
617 | ||
e1f7c9ee LD |
618 | static int at_xdmac_set_slave_config(struct dma_chan *chan, |
619 | struct dma_slave_config *sconfig) | |
620 | { | |
621 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
e1f7c9ee | 622 | |
765c37d8 LD |
623 | if (at_xdmac_check_slave_config(sconfig)) { |
624 | dev_err(chan2dev(chan), "invalid slave configuration\n"); | |
e1f7c9ee LD |
625 | return -EINVAL; |
626 | } | |
e1f7c9ee | 627 | |
765c37d8 | 628 | memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); |
e1f7c9ee LD |
629 | |
630 | return 0; | |
631 | } | |
632 | ||
633 | static struct dma_async_tx_descriptor * | |
634 | at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
635 | unsigned int sg_len, enum dma_transfer_direction direction, | |
636 | unsigned long flags, void *context) | |
637 | { | |
35ca0ee4 LD |
638 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
639 | struct at_xdmac_desc *first = NULL, *prev = NULL; | |
640 | struct scatterlist *sg; | |
641 | int i; | |
642 | unsigned int xfer_size = 0; | |
643 | unsigned long irqflags; | |
4c374fc7 | 644 | struct dma_async_tx_descriptor *ret = NULL; |
e1f7c9ee LD |
645 | |
646 | if (!sgl) | |
647 | return NULL; | |
648 | ||
649 | if (!is_slave_direction(direction)) { | |
650 | dev_err(chan2dev(chan), "invalid DMA direction\n"); | |
651 | return NULL; | |
652 | } | |
653 | ||
654 | dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n", | |
655 | __func__, sg_len, | |
656 | direction == DMA_MEM_TO_DEV ? "to device" : "from device", | |
657 | flags); | |
658 | ||
659 | /* Protect dma_sconfig field that can be modified by set_slave_conf. */ | |
4c374fc7 | 660 | spin_lock_irqsave(&atchan->lock, irqflags); |
e1f7c9ee | 661 | |
765c37d8 LD |
662 | if (at_xdmac_compute_chan_conf(chan, direction)) |
663 | goto spin_unlock; | |
664 | ||
e1f7c9ee LD |
665 | /* Prepare descriptors. */ |
666 | for_each_sg(sgl, sg, sg_len, i) { | |
667 | struct at_xdmac_desc *desc = NULL; | |
6d3a7d9e | 668 | u32 len, mem, dwidth, fixed_dwidth; |
e1f7c9ee LD |
669 | |
670 | len = sg_dma_len(sg); | |
671 | mem = sg_dma_address(sg); | |
672 | if (unlikely(!len)) { | |
673 | dev_err(chan2dev(chan), "sg data length is zero\n"); | |
4c374fc7 | 674 | goto spin_unlock; |
e1f7c9ee LD |
675 | } |
676 | dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", | |
677 | __func__, i, len, mem); | |
678 | ||
679 | desc = at_xdmac_get_desc(atchan); | |
680 | if (!desc) { | |
681 | dev_err(chan2dev(chan), "can't get descriptor\n"); | |
682 | if (first) | |
683 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | |
4c374fc7 | 684 | goto spin_unlock; |
e1f7c9ee LD |
685 | } |
686 | ||
687 | /* Linked list descriptor setup. */ | |
688 | if (direction == DMA_DEV_TO_MEM) { | |
765c37d8 | 689 | desc->lld.mbr_sa = atchan->sconfig.src_addr; |
e1f7c9ee | 690 | desc->lld.mbr_da = mem; |
e1f7c9ee LD |
691 | } else { |
692 | desc->lld.mbr_sa = mem; | |
765c37d8 | 693 | desc->lld.mbr_da = atchan->sconfig.dst_addr; |
e1f7c9ee | 694 | } |
1c8a38b1 | 695 | dwidth = at_xdmac_get_dwidth(atchan->cfg); |
6d3a7d9e | 696 | fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) |
1c8a38b1 | 697 | ? dwidth |
6d3a7d9e LD |
698 | : AT_XDMAC_CC_DWIDTH_BYTE; |
699 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | |
be835074 LD |
700 | | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ |
701 | | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | |
6d3a7d9e | 702 | | (len >> fixed_dwidth); /* microblock length */ |
1c8a38b1 CP |
703 | desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | |
704 | AT_XDMAC_CC_DWIDTH(fixed_dwidth); | |
e1f7c9ee | 705 | dev_dbg(chan2dev(chan), |
82e24246 VK |
706 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", |
707 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); | |
e1f7c9ee LD |
708 | |
709 | /* Chain lld. */ | |
0d0ee751 MR |
710 | if (prev) |
711 | at_xdmac_queue_desc(chan, prev, desc); | |
e1f7c9ee LD |
712 | |
713 | prev = desc; | |
714 | if (!first) | |
715 | first = desc; | |
716 | ||
717 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | |
718 | __func__, desc, first); | |
719 | list_add_tail(&desc->desc_node, &first->descs_list); | |
57819276 | 720 | xfer_size += len; |
e1f7c9ee LD |
721 | } |
722 | ||
e1f7c9ee LD |
723 | |
724 | first->tx_dma_desc.flags = flags; | |
57819276 | 725 | first->xfer_size = xfer_size; |
e1f7c9ee | 726 | first->direction = direction; |
4c374fc7 | 727 | ret = &first->tx_dma_desc; |
e1f7c9ee | 728 | |
4c374fc7 LD |
729 | spin_unlock: |
730 | spin_unlock_irqrestore(&atchan->lock, irqflags); | |
731 | return ret; | |
e1f7c9ee LD |
732 | } |
733 | ||
734 | static struct dma_async_tx_descriptor * | |
735 | at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | |
736 | size_t buf_len, size_t period_len, | |
737 | enum dma_transfer_direction direction, | |
738 | unsigned long flags) | |
739 | { | |
740 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
741 | struct at_xdmac_desc *first = NULL, *prev = NULL; | |
742 | unsigned int periods = buf_len / period_len; | |
743 | int i; | |
4c374fc7 | 744 | unsigned long irqflags; |
e1f7c9ee | 745 | |
82e24246 VK |
746 | dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", |
747 | __func__, &buf_addr, buf_len, period_len, | |
e1f7c9ee LD |
748 | direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags); |
749 | ||
750 | if (!is_slave_direction(direction)) { | |
751 | dev_err(chan2dev(chan), "invalid DMA direction\n"); | |
752 | return NULL; | |
753 | } | |
754 | ||
755 | if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) { | |
756 | dev_err(chan2dev(chan), "channel currently used\n"); | |
757 | return NULL; | |
758 | } | |
759 | ||
765c37d8 LD |
760 | if (at_xdmac_compute_chan_conf(chan, direction)) |
761 | return NULL; | |
762 | ||
e1f7c9ee LD |
763 | for (i = 0; i < periods; i++) { |
764 | struct at_xdmac_desc *desc = NULL; | |
765 | ||
4c374fc7 | 766 | spin_lock_irqsave(&atchan->lock, irqflags); |
e1f7c9ee LD |
767 | desc = at_xdmac_get_desc(atchan); |
768 | if (!desc) { | |
769 | dev_err(chan2dev(chan), "can't get descriptor\n"); | |
770 | if (first) | |
771 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | |
4c374fc7 | 772 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
e1f7c9ee LD |
773 | return NULL; |
774 | } | |
4c374fc7 | 775 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
e1f7c9ee | 776 | dev_dbg(chan2dev(chan), |
82e24246 VK |
777 | "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", |
778 | __func__, desc, &desc->tx_dma_desc.phys); | |
e1f7c9ee LD |
779 | |
780 | if (direction == DMA_DEV_TO_MEM) { | |
765c37d8 | 781 | desc->lld.mbr_sa = atchan->sconfig.src_addr; |
e1f7c9ee | 782 | desc->lld.mbr_da = buf_addr + i * period_len; |
e1f7c9ee LD |
783 | } else { |
784 | desc->lld.mbr_sa = buf_addr + i * period_len; | |
765c37d8 | 785 | desc->lld.mbr_da = atchan->sconfig.dst_addr; |
5ac7d582 | 786 | } |
765c37d8 | 787 | desc->lld.mbr_cfg = atchan->cfg; |
e1f7c9ee LD |
788 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 |
789 | | AT_XDMAC_MBR_UBC_NDEN | |
790 | | AT_XDMAC_MBR_UBC_NSEN | |
6eb9d3c1 | 791 | | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); |
e1f7c9ee LD |
792 | |
793 | dev_dbg(chan2dev(chan), | |
82e24246 VK |
794 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", |
795 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); | |
e1f7c9ee LD |
796 | |
797 | /* Chain lld. */ | |
0d0ee751 MR |
798 | if (prev) |
799 | at_xdmac_queue_desc(chan, prev, desc); | |
e1f7c9ee LD |
800 | |
801 | prev = desc; | |
802 | if (!first) | |
803 | first = desc; | |
804 | ||
805 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | |
806 | __func__, desc, first); | |
807 | list_add_tail(&desc->desc_node, &first->descs_list); | |
808 | } | |
809 | ||
e900c30d | 810 | at_xdmac_queue_desc(chan, prev, first); |
e1f7c9ee LD |
811 | first->tx_dma_desc.flags = flags; |
812 | first->xfer_size = buf_len; | |
813 | first->direction = direction; | |
814 | ||
815 | return &first->tx_dma_desc; | |
816 | } | |
817 | ||
f0816a36 MR |
818 | static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr) |
819 | { | |
820 | u32 width; | |
821 | ||
822 | /* | |
823 | * Check address alignment to select the greater data width we | |
824 | * can use. | |
825 | * | |
826 | * Some XDMAC implementations don't provide dword transfer, in | |
827 | * this case selecting dword has the same behavior as | |
828 | * selecting word transfers. | |
829 | */ | |
830 | if (!(addr & 7)) { | |
831 | width = AT_XDMAC_CC_DWIDTH_DWORD; | |
832 | dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__); | |
833 | } else if (!(addr & 3)) { | |
834 | width = AT_XDMAC_CC_DWIDTH_WORD; | |
835 | dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__); | |
836 | } else if (!(addr & 1)) { | |
837 | width = AT_XDMAC_CC_DWIDTH_HALFWORD; | |
838 | dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__); | |
839 | } else { | |
840 | width = AT_XDMAC_CC_DWIDTH_BYTE; | |
841 | dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__); | |
842 | } | |
843 | ||
844 | return width; | |
845 | } | |
846 | ||
6007ccb5 MR |
847 | static struct at_xdmac_desc * |
848 | at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |
849 | struct at_xdmac_chan *atchan, | |
850 | struct at_xdmac_desc *prev, | |
851 | dma_addr_t src, dma_addr_t dst, | |
852 | struct dma_interleaved_template *xt, | |
853 | struct data_chunk *chunk) | |
854 | { | |
855 | struct at_xdmac_desc *desc; | |
856 | u32 dwidth; | |
857 | unsigned long flags; | |
858 | size_t ublen; | |
859 | /* | |
860 | * WARNING: The channel configuration is set here since there is no | |
861 | * dmaengine_slave_config call in this case. Moreover we don't know the | |
862 | * direction, it involves we can't dynamically set the source and dest | |
863 | * interface so we have to use the same one. Only interface 0 allows EBI | |
864 | * access. Hopefully we can access DDR through both ports (at least on | |
865 | * SAMA5D4x), so we can use the same interface for source and dest, | |
866 | * that solves the fact we don't know the direction. | |
95da0c19 LD |
867 | * ERRATA: Even if useless for memory transfers, the PERID has to not |
868 | * match the one of another channel. If not, it could lead to spurious | |
869 | * flag status. | |
6007ccb5 | 870 | */ |
95da0c19 LD |
871 | u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) |
872 | | AT_XDMAC_CC_DIF(0) | |
6007ccb5 MR |
873 | | AT_XDMAC_CC_SIF(0) |
874 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | |
875 | | AT_XDMAC_CC_TYPE_MEM_TRAN; | |
876 | ||
877 | dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); | |
878 | if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { | |
879 | dev_dbg(chan2dev(chan), | |
880 | "%s: chunk too big (%d, max size %lu)...\n", | |
881 | __func__, chunk->size, | |
882 | AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth); | |
883 | return NULL; | |
884 | } | |
885 | ||
886 | if (prev) | |
887 | dev_dbg(chan2dev(chan), | |
888 | "Adding items at the end of desc 0x%p\n", prev); | |
889 | ||
890 | if (xt->src_inc) { | |
891 | if (xt->src_sgl) | |
a1cf0903 | 892 | chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; |
6007ccb5 MR |
893 | else |
894 | chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; | |
895 | } | |
896 | ||
897 | if (xt->dst_inc) { | |
898 | if (xt->dst_sgl) | |
a1cf0903 | 899 | chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; |
6007ccb5 MR |
900 | else |
901 | chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; | |
902 | } | |
903 | ||
904 | spin_lock_irqsave(&atchan->lock, flags); | |
905 | desc = at_xdmac_get_desc(atchan); | |
906 | spin_unlock_irqrestore(&atchan->lock, flags); | |
907 | if (!desc) { | |
908 | dev_err(chan2dev(chan), "can't get descriptor\n"); | |
909 | return NULL; | |
910 | } | |
911 | ||
912 | chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); | |
913 | ||
914 | ublen = chunk->size >> dwidth; | |
915 | ||
916 | desc->lld.mbr_sa = src; | |
917 | desc->lld.mbr_da = dst; | |
87d001ef MR |
918 | desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk); |
919 | desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk); | |
6007ccb5 MR |
920 | |
921 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 | |
922 | | AT_XDMAC_MBR_UBC_NDEN | |
923 | | AT_XDMAC_MBR_UBC_NSEN | |
924 | | ublen; | |
925 | desc->lld.mbr_cfg = chan_cc; | |
926 | ||
927 | dev_dbg(chan2dev(chan), | |
268914f4 AB |
928 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", |
929 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, | |
6007ccb5 MR |
930 | desc->lld.mbr_ubc, desc->lld.mbr_cfg); |
931 | ||
932 | /* Chain lld. */ | |
933 | if (prev) | |
934 | at_xdmac_queue_desc(chan, prev, desc); | |
935 | ||
936 | return desc; | |
937 | } | |
938 | ||
6007ccb5 MR |
939 | static struct dma_async_tx_descriptor * |
940 | at_xdmac_prep_interleaved(struct dma_chan *chan, | |
941 | struct dma_interleaved_template *xt, | |
942 | unsigned long flags) | |
943 | { | |
944 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
945 | struct at_xdmac_desc *prev = NULL, *first = NULL; | |
6007ccb5 | 946 | dma_addr_t dst_addr, src_addr; |
4e538578 MR |
947 | size_t src_skip = 0, dst_skip = 0, len = 0; |
948 | struct data_chunk *chunk; | |
6007ccb5 MR |
949 | int i; |
950 | ||
4e538578 MR |
951 | if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM)) |
952 | return NULL; | |
953 | ||
954 | /* | |
955 | * TODO: Handle the case where we have to repeat a chain of | |
956 | * descriptors... | |
957 | */ | |
958 | if ((xt->numf > 1) && (xt->frame_size > 1)) | |
6007ccb5 MR |
959 | return NULL; |
960 | ||
268914f4 AB |
961 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", |
962 | __func__, &xt->src_start, &xt->dst_start, xt->numf, | |
6007ccb5 MR |
963 | xt->frame_size, flags); |
964 | ||
965 | src_addr = xt->src_start; | |
966 | dst_addr = xt->dst_start; | |
967 | ||
4e538578 MR |
968 | if (xt->numf > 1) { |
969 | first = at_xdmac_interleaved_queue_desc(chan, atchan, | |
970 | NULL, | |
971 | src_addr, dst_addr, | |
972 | xt, xt->sgl); | |
ef10b0b2 SE |
973 | |
974 | /* Length of the block is (BLEN+1) microblocks. */ | |
975 | for (i = 0; i < xt->numf - 1; i++) | |
4e538578 | 976 | at_xdmac_increment_block_count(chan, first); |
62b5cb75 LD |
977 | |
978 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | |
979 | __func__, first, first); | |
980 | list_add_tail(&first->desc_node, &first->descs_list); | |
4e538578 MR |
981 | } else { |
982 | for (i = 0; i < xt->frame_size; i++) { | |
983 | size_t src_icg = 0, dst_icg = 0; | |
984 | struct at_xdmac_desc *desc; | |
6007ccb5 | 985 | |
4e538578 | 986 | chunk = xt->sgl + i; |
6007ccb5 | 987 | |
4e538578 MR |
988 | dst_icg = dmaengine_get_dst_icg(xt, chunk); |
989 | src_icg = dmaengine_get_src_icg(xt, chunk); | |
6007ccb5 | 990 | |
4e538578 MR |
991 | src_skip = chunk->size + src_icg; |
992 | dst_skip = chunk->size + dst_icg; | |
6007ccb5 | 993 | |
6007ccb5 | 994 | dev_dbg(chan2dev(chan), |
4e538578 MR |
995 | "%s: chunk size=%d, src icg=%d, dst icg=%d\n", |
996 | __func__, chunk->size, src_icg, dst_icg); | |
997 | ||
998 | desc = at_xdmac_interleaved_queue_desc(chan, atchan, | |
999 | prev, | |
1000 | src_addr, dst_addr, | |
1001 | xt, chunk); | |
1002 | if (!desc) { | |
1003 | list_splice_init(&first->descs_list, | |
1004 | &atchan->free_descs_list); | |
1005 | return NULL; | |
1006 | } | |
1007 | ||
1008 | if (!first) | |
1009 | first = desc; | |
1010 | ||
1011 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | |
1012 | __func__, desc, first); | |
1013 | list_add_tail(&desc->desc_node, &first->descs_list); | |
1014 | ||
1015 | if (xt->src_sgl) | |
1016 | src_addr += src_skip; | |
1017 | ||
1018 | if (xt->dst_sgl) | |
1019 | dst_addr += dst_skip; | |
1020 | ||
1021 | len += chunk->size; | |
1022 | prev = desc; | |
6007ccb5 | 1023 | } |
6007ccb5 MR |
1024 | } |
1025 | ||
1026 | first->tx_dma_desc.cookie = -EBUSY; | |
1027 | first->tx_dma_desc.flags = flags; | |
1028 | first->xfer_size = len; | |
1029 | ||
1030 | return &first->tx_dma_desc; | |
1031 | } | |
1032 | ||
e1f7c9ee LD |
1033 | static struct dma_async_tx_descriptor * |
1034 | at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
1035 | size_t len, unsigned long flags) | |
1036 | { | |
1037 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1038 | struct at_xdmac_desc *first = NULL, *prev = NULL; | |
1039 | size_t remaining_size = len, xfer_size = 0, ublen; | |
1040 | dma_addr_t src_addr = src, dst_addr = dest; | |
1041 | u32 dwidth; | |
1042 | /* | |
1043 | * WARNING: We don't know the direction, it involves we can't | |
1044 | * dynamically set the source and dest interface so we have to use the | |
1045 | * same one. Only interface 0 allows EBI access. Hopefully we can | |
1046 | * access DDR through both ports (at least on SAMA5D4x), so we can use | |
1047 | * the same interface for source and dest, that solves the fact we | |
1048 | * don't know the direction. | |
95da0c19 LD |
1049 | * ERRATA: Even if useless for memory transfers, the PERID has to not |
1050 | * match the one of another channel. If not, it could lead to spurious | |
1051 | * flag status. | |
e1f7c9ee | 1052 | */ |
95da0c19 LD |
1053 | u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) |
1054 | | AT_XDMAC_CC_DAM_INCREMENTED_AM | |
e1f7c9ee LD |
1055 | | AT_XDMAC_CC_SAM_INCREMENTED_AM |
1056 | | AT_XDMAC_CC_DIF(0) | |
1057 | | AT_XDMAC_CC_SIF(0) | |
1058 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | |
1059 | | AT_XDMAC_CC_TYPE_MEM_TRAN; | |
4c374fc7 | 1060 | unsigned long irqflags; |
e1f7c9ee | 1061 | |
82e24246 VK |
1062 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", |
1063 | __func__, &src, &dest, len, flags); | |
e1f7c9ee LD |
1064 | |
1065 | if (unlikely(!len)) | |
1066 | return NULL; | |
1067 | ||
f0816a36 | 1068 | dwidth = at_xdmac_align_width(chan, src_addr | dst_addr); |
e1f7c9ee LD |
1069 | |
1070 | /* Prepare descriptors. */ | |
1071 | while (remaining_size) { | |
1072 | struct at_xdmac_desc *desc = NULL; | |
1073 | ||
c66ec04e | 1074 | dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); |
e1f7c9ee | 1075 | |
4c374fc7 | 1076 | spin_lock_irqsave(&atchan->lock, irqflags); |
e1f7c9ee | 1077 | desc = at_xdmac_get_desc(atchan); |
4c374fc7 | 1078 | spin_unlock_irqrestore(&atchan->lock, irqflags); |
e1f7c9ee LD |
1079 | if (!desc) { |
1080 | dev_err(chan2dev(chan), "can't get descriptor\n"); | |
1081 | if (first) | |
1082 | list_splice_init(&first->descs_list, &atchan->free_descs_list); | |
1083 | return NULL; | |
1084 | } | |
1085 | ||
1086 | /* Update src and dest addresses. */ | |
1087 | src_addr += xfer_size; | |
1088 | dst_addr += xfer_size; | |
1089 | ||
1090 | if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth) | |
1091 | xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth; | |
1092 | else | |
1093 | xfer_size = remaining_size; | |
1094 | ||
c66ec04e | 1095 | dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size); |
e1f7c9ee LD |
1096 | |
1097 | /* Check remaining length and change data width if needed. */ | |
f0816a36 MR |
1098 | dwidth = at_xdmac_align_width(chan, |
1099 | src_addr | dst_addr | xfer_size); | |
aa876cd4 | 1100 | chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK; |
e1f7c9ee LD |
1101 | chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); |
1102 | ||
1103 | ublen = xfer_size >> dwidth; | |
1104 | remaining_size -= xfer_size; | |
1105 | ||
1106 | desc->lld.mbr_sa = src_addr; | |
1107 | desc->lld.mbr_da = dst_addr; | |
1108 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 | |
1109 | | AT_XDMAC_MBR_UBC_NDEN | |
1110 | | AT_XDMAC_MBR_UBC_NSEN | |
e1f7c9ee LD |
1111 | | ublen; |
1112 | desc->lld.mbr_cfg = chan_cc; | |
1113 | ||
1114 | dev_dbg(chan2dev(chan), | |
82e24246 VK |
1115 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", |
1116 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); | |
e1f7c9ee LD |
1117 | |
1118 | /* Chain lld. */ | |
0d0ee751 MR |
1119 | if (prev) |
1120 | at_xdmac_queue_desc(chan, prev, desc); | |
e1f7c9ee LD |
1121 | |
1122 | prev = desc; | |
1123 | if (!first) | |
1124 | first = desc; | |
1125 | ||
1126 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | |
1127 | __func__, desc, first); | |
1128 | list_add_tail(&desc->desc_node, &first->descs_list); | |
1129 | } | |
1130 | ||
1131 | first->tx_dma_desc.flags = flags; | |
1132 | first->xfer_size = len; | |
1133 | ||
1134 | return &first->tx_dma_desc; | |
1135 | } | |
1136 | ||
b206d9a2 MR |
1137 | static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, |
1138 | struct at_xdmac_chan *atchan, | |
1139 | dma_addr_t dst_addr, | |
1140 | size_t len, | |
1141 | int value) | |
1142 | { | |
1143 | struct at_xdmac_desc *desc; | |
1144 | unsigned long flags; | |
1145 | size_t ublen; | |
1146 | u32 dwidth; | |
1147 | /* | |
1148 | * WARNING: The channel configuration is set here since there is no | |
1149 | * dmaengine_slave_config call in this case. Moreover we don't know the | |
1150 | * direction, it involves we can't dynamically set the source and dest | |
1151 | * interface so we have to use the same one. Only interface 0 allows EBI | |
1152 | * access. Hopefully we can access DDR through both ports (at least on | |
1153 | * SAMA5D4x), so we can use the same interface for source and dest, | |
1154 | * that solves the fact we don't know the direction. | |
95da0c19 LD |
1155 | * ERRATA: Even if useless for memory transfers, the PERID has to not |
1156 | * match the one of another channel. If not, it could lead to spurious | |
1157 | * flag status. | |
b206d9a2 | 1158 | */ |
95da0c19 LD |
1159 | u32 chan_cc = AT_XDMAC_CC_PERID(0x3f) |
1160 | | AT_XDMAC_CC_DAM_UBS_AM | |
b206d9a2 MR |
1161 | | AT_XDMAC_CC_SAM_INCREMENTED_AM |
1162 | | AT_XDMAC_CC_DIF(0) | |
1163 | | AT_XDMAC_CC_SIF(0) | |
1164 | | AT_XDMAC_CC_MBSIZE_SIXTEEN | |
1165 | | AT_XDMAC_CC_MEMSET_HW_MODE | |
1166 | | AT_XDMAC_CC_TYPE_MEM_TRAN; | |
1167 | ||
1168 | dwidth = at_xdmac_align_width(chan, dst_addr); | |
1169 | ||
1170 | if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { | |
1171 | dev_err(chan2dev(chan), | |
1172 | "%s: Transfer too large, aborting...\n", | |
1173 | __func__); | |
1174 | return NULL; | |
1175 | } | |
1176 | ||
1177 | spin_lock_irqsave(&atchan->lock, flags); | |
1178 | desc = at_xdmac_get_desc(atchan); | |
1179 | spin_unlock_irqrestore(&atchan->lock, flags); | |
1180 | if (!desc) { | |
1181 | dev_err(chan2dev(chan), "can't get descriptor\n"); | |
1182 | return NULL; | |
1183 | } | |
1184 | ||
1185 | chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); | |
1186 | ||
1187 | ublen = len >> dwidth; | |
1188 | ||
1189 | desc->lld.mbr_da = dst_addr; | |
1190 | desc->lld.mbr_ds = value; | |
1191 | desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 | |
1192 | | AT_XDMAC_MBR_UBC_NDEN | |
1193 | | AT_XDMAC_MBR_UBC_NSEN | |
1194 | | ublen; | |
1195 | desc->lld.mbr_cfg = chan_cc; | |
1196 | ||
1197 | dev_dbg(chan2dev(chan), | |
3935e087 AB |
1198 | "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", |
1199 | __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, | |
b206d9a2 MR |
1200 | desc->lld.mbr_cfg); |
1201 | ||
1202 | return desc; | |
1203 | } | |
1204 | ||
192dc8c0 | 1205 | static struct dma_async_tx_descriptor * |
b206d9a2 MR |
1206 | at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, |
1207 | size_t len, unsigned long flags) | |
1208 | { | |
1209 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1210 | struct at_xdmac_desc *desc; | |
1211 | ||
268914f4 AB |
1212 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", |
1213 | __func__, &dest, len, value, flags); | |
b206d9a2 MR |
1214 | |
1215 | if (unlikely(!len)) | |
1216 | return NULL; | |
1217 | ||
1218 | desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value); | |
1219 | list_add_tail(&desc->desc_node, &desc->descs_list); | |
1220 | ||
1221 | desc->tx_dma_desc.cookie = -EBUSY; | |
1222 | desc->tx_dma_desc.flags = flags; | |
1223 | desc->xfer_size = len; | |
1224 | ||
1225 | return &desc->tx_dma_desc; | |
1226 | } | |
1227 | ||
67a6eedc MR |
1228 | static struct dma_async_tx_descriptor * |
1229 | at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
1230 | unsigned int sg_len, int value, | |
1231 | unsigned long flags) | |
1232 | { | |
1233 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1234 | struct at_xdmac_desc *desc, *pdesc = NULL, | |
1235 | *ppdesc = NULL, *first = NULL; | |
1236 | struct scatterlist *sg, *psg = NULL, *ppsg = NULL; | |
1237 | size_t stride = 0, pstride = 0, len = 0; | |
1238 | int i; | |
1239 | ||
1240 | if (!sgl) | |
1241 | return NULL; | |
1242 | ||
1243 | dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n", | |
1244 | __func__, sg_len, value, flags); | |
1245 | ||
1246 | /* Prepare descriptors. */ | |
1247 | for_each_sg(sgl, sg, sg_len, i) { | |
268914f4 AB |
1248 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", |
1249 | __func__, &sg_dma_address(sg), sg_dma_len(sg), | |
67a6eedc MR |
1250 | value, flags); |
1251 | desc = at_xdmac_memset_create_desc(chan, atchan, | |
1252 | sg_dma_address(sg), | |
1253 | sg_dma_len(sg), | |
1254 | value); | |
1255 | if (!desc && first) | |
1256 | list_splice_init(&first->descs_list, | |
1257 | &atchan->free_descs_list); | |
1258 | ||
1259 | if (!first) | |
1260 | first = desc; | |
1261 | ||
1262 | /* Update our strides */ | |
1263 | pstride = stride; | |
1264 | if (psg) | |
1265 | stride = sg_dma_address(sg) - | |
1266 | (sg_dma_address(psg) + sg_dma_len(psg)); | |
1267 | ||
1268 | /* | |
1269 | * The scatterlist API gives us only the address and | |
1270 | * length of each elements. | |
1271 | * | |
1272 | * Unfortunately, we don't have the stride, which we | |
1273 | * will need to compute. | |
1274 | * | |
1275 | * That make us end up in a situation like this one: | |
1276 | * len stride len stride len | |
1277 | * +-------+ +-------+ +-------+ | |
1278 | * | N-2 | | N-1 | | N | | |
1279 | * +-------+ +-------+ +-------+ | |
1280 | * | |
1281 | * We need all these three elements (N-2, N-1 and N) | |
1282 | * to actually take the decision on whether we need to | |
1283 | * queue N-1 or reuse N-2. | |
1284 | * | |
1285 | * We will only consider N if it is the last element. | |
1286 | */ | |
1287 | if (ppdesc && pdesc) { | |
1288 | if ((stride == pstride) && | |
1289 | (sg_dma_len(ppsg) == sg_dma_len(psg))) { | |
1290 | dev_dbg(chan2dev(chan), | |
1291 | "%s: desc 0x%p can be merged with desc 0x%p\n", | |
1292 | __func__, pdesc, ppdesc); | |
1293 | ||
1294 | /* | |
1295 | * Increment the block count of the | |
1296 | * N-2 descriptor | |
1297 | */ | |
1298 | at_xdmac_increment_block_count(chan, ppdesc); | |
1299 | ppdesc->lld.mbr_dus = stride; | |
1300 | ||
1301 | /* | |
1302 | * Put back the N-1 descriptor in the | |
1303 | * free descriptor list | |
1304 | */ | |
1305 | list_add_tail(&pdesc->desc_node, | |
1306 | &atchan->free_descs_list); | |
1307 | ||
1308 | /* | |
1309 | * Make our N-1 descriptor pointer | |
1310 | * point to the N-2 since they were | |
1311 | * actually merged. | |
1312 | */ | |
1313 | pdesc = ppdesc; | |
1314 | ||
1315 | /* | |
1316 | * Rule out the case where we don't have | |
1317 | * pstride computed yet (our second sg | |
1318 | * element) | |
1319 | * | |
1320 | * We also want to catch the case where there | |
1321 | * would be a negative stride, | |
1322 | */ | |
1323 | } else if (pstride || | |
1324 | sg_dma_address(sg) < sg_dma_address(psg)) { | |
1325 | /* | |
1326 | * Queue the N-1 descriptor after the | |
1327 | * N-2 | |
1328 | */ | |
1329 | at_xdmac_queue_desc(chan, ppdesc, pdesc); | |
1330 | ||
1331 | /* | |
1332 | * Add the N-1 descriptor to the list | |
1333 | * of the descriptors used for this | |
1334 | * transfer | |
1335 | */ | |
1336 | list_add_tail(&desc->desc_node, | |
1337 | &first->descs_list); | |
1338 | dev_dbg(chan2dev(chan), | |
1339 | "%s: add desc 0x%p to descs_list 0x%p\n", | |
1340 | __func__, desc, first); | |
1341 | } | |
1342 | } | |
1343 | ||
1344 | /* | |
1345 | * If we are the last element, just see if we have the | |
1346 | * same size than the previous element. | |
1347 | * | |
1348 | * If so, we can merge it with the previous descriptor | |
1349 | * since we don't care about the stride anymore. | |
1350 | */ | |
1351 | if ((i == (sg_len - 1)) && | |
f5a00eb7 | 1352 | sg_dma_len(psg) == sg_dma_len(sg)) { |
67a6eedc MR |
1353 | dev_dbg(chan2dev(chan), |
1354 | "%s: desc 0x%p can be merged with desc 0x%p\n", | |
1355 | __func__, desc, pdesc); | |
1356 | ||
1357 | /* | |
1358 | * Increment the block count of the N-1 | |
1359 | * descriptor | |
1360 | */ | |
1361 | at_xdmac_increment_block_count(chan, pdesc); | |
1362 | pdesc->lld.mbr_dus = stride; | |
1363 | ||
1364 | /* | |
1365 | * Put back the N descriptor in the free | |
1366 | * descriptor list | |
1367 | */ | |
1368 | list_add_tail(&desc->desc_node, | |
1369 | &atchan->free_descs_list); | |
1370 | } | |
1371 | ||
1372 | /* Update our descriptors */ | |
1373 | ppdesc = pdesc; | |
1374 | pdesc = desc; | |
1375 | ||
1376 | /* Update our scatter pointers */ | |
1377 | ppsg = psg; | |
1378 | psg = sg; | |
1379 | ||
1380 | len += sg_dma_len(sg); | |
1381 | } | |
1382 | ||
1383 | first->tx_dma_desc.cookie = -EBUSY; | |
1384 | first->tx_dma_desc.flags = flags; | |
1385 | first->xfer_size = len; | |
1386 | ||
1387 | return &first->tx_dma_desc; | |
1388 | } | |
1389 | ||
e1f7c9ee LD |
1390 | static enum dma_status |
1391 | at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
1392 | struct dma_tx_state *txstate) | |
1393 | { | |
1394 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1395 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | |
1396 | struct at_xdmac_desc *desc, *_desc; | |
1397 | struct list_head *descs_list; | |
1398 | enum dma_status ret; | |
25c5e962 LD |
1399 | int residue, retry; |
1400 | u32 cur_nda, check_nda, cur_ubc, mask, value; | |
be835074 | 1401 | u8 dwidth = 0; |
4c374fc7 | 1402 | unsigned long flags; |
53398f48 | 1403 | bool initd; |
e1f7c9ee LD |
1404 | |
1405 | ret = dma_cookie_status(chan, cookie, txstate); | |
1406 | if (ret == DMA_COMPLETE) | |
1407 | return ret; | |
1408 | ||
1409 | if (!txstate) | |
1410 | return ret; | |
1411 | ||
4c374fc7 | 1412 | spin_lock_irqsave(&atchan->lock, flags); |
e1f7c9ee LD |
1413 | |
1414 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | |
1415 | ||
1416 | /* | |
1417 | * If the transfer has not been started yet, don't need to compute the | |
1418 | * residue, it's the transfer length. | |
1419 | */ | |
1420 | if (!desc->active_xfer) { | |
1421 | dma_set_residue(txstate, desc->xfer_size); | |
4c374fc7 | 1422 | goto spin_unlock; |
e1f7c9ee LD |
1423 | } |
1424 | ||
1425 | residue = desc->xfer_size; | |
4e097820 CP |
1426 | /* |
1427 | * Flush FIFO: only relevant when the transfer is source peripheral | |
9295c41d LD |
1428 | * synchronized. Flush is needed before reading CUBC because data in |
1429 | * the FIFO are not reported by CUBC. Reporting a residue of the | |
1430 | * transfer length while we have data in FIFO can cause issue. | |
1431 | * Usecase: atmel USART has a timeout which means I have received | |
1432 | * characters but there is no more character received for a while. On | |
1433 | * timeout, it requests the residue. If the data are in the DMA FIFO, | |
1434 | * we will return a residue of the transfer length. It means no data | |
1435 | * received. If an application is waiting for these data, it will hang | |
1436 | * since we won't have another USART timeout without receiving new | |
1437 | * data. | |
4e097820 CP |
1438 | */ |
1439 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; | |
1440 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; | |
be835074 | 1441 | if ((desc->lld.mbr_cfg & mask) == value) { |
4e097820 CP |
1442 | at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); |
1443 | while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) | |
1444 | cpu_relax(); | |
1445 | } | |
e1f7c9ee | 1446 | |
25c5e962 | 1447 | /* |
53398f48 LD |
1448 | * The easiest way to compute the residue should be to pause the DMA |
1449 | * but doing this can lead to miss some data as some devices don't | |
1450 | * have FIFO. | |
1451 | * We need to read several registers because: | |
1452 | * - DMA is running therefore a descriptor change is possible while | |
1453 | * reading these registers | |
1454 | * - When the block transfer is done, the value of the CUBC register | |
1455 | * is set to its initial value until the fetch of the next descriptor. | |
1456 | * This value will corrupt the residue calculation so we have to skip | |
1457 | * it. | |
1458 | * | |
1459 | * INITD -------- ------------ | |
1460 | * |____________________| | |
1461 | * _______________________ _______________ | |
1462 | * NDA @desc2 \/ @desc3 | |
1463 | * _______________________/\_______________ | |
1464 | * __________ ___________ _______________ | |
1465 | * CUBC 0 \/ MAX desc1 \/ MAX desc2 | |
1466 | * __________/\___________/\_______________ | |
1467 | * | |
1468 | * Since descriptors are aligned on 64 bits, we can assume that | |
1469 | * the update of NDA and CUBC is atomic. | |
25c5e962 | 1470 | * Memory barriers are used to ensure the read order of the registers. |
53398f48 | 1471 | * A max number of retries is set because unlikely it could never ends. |
25c5e962 | 1472 | */ |
25c5e962 | 1473 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { |
25c5e962 | 1474 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; |
53398f48 LD |
1475 | rmb(); |
1476 | initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); | |
25c5e962 LD |
1477 | rmb(); |
1478 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | |
53398f48 LD |
1479 | rmb(); |
1480 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | |
1481 | rmb(); | |
1482 | ||
1483 | if ((check_nda == cur_nda) && initd) | |
1484 | break; | |
25c5e962 LD |
1485 | } |
1486 | ||
1487 | if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { | |
1488 | ret = DMA_ERROR; | |
1489 | goto spin_unlock; | |
1490 | } | |
1491 | ||
9295c41d LD |
1492 | /* |
1493 | * Flush FIFO: only relevant when the transfer is source peripheral | |
1494 | * synchronized. Another flush is needed here because CUBC is updated | |
1495 | * when the controller sends the data write command. It can lead to | |
1496 | * report data that are not written in the memory or the device. The | |
1497 | * FIFO flush ensures that data are really written. | |
1498 | */ | |
1499 | if ((desc->lld.mbr_cfg & mask) == value) { | |
1500 | at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); | |
1501 | while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) | |
1502 | cpu_relax(); | |
1503 | } | |
1504 | ||
e1f7c9ee LD |
1505 | /* |
1506 | * Remove size of all microblocks already transferred and the current | |
1507 | * one. Then add the remaining size to transfer of the current | |
1508 | * microblock. | |
1509 | */ | |
1510 | descs_list = &desc->descs_list; | |
1511 | list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { | |
be835074 | 1512 | dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); |
e1f7c9ee LD |
1513 | residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; |
1514 | if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) | |
1515 | break; | |
1516 | } | |
25c5e962 | 1517 | residue += cur_ubc << dwidth; |
e1f7c9ee | 1518 | |
e1f7c9ee LD |
1519 | dma_set_residue(txstate, residue); |
1520 | ||
1521 | dev_dbg(chan2dev(chan), | |
82e24246 VK |
1522 | "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", |
1523 | __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); | |
e1f7c9ee | 1524 | |
4c374fc7 LD |
1525 | spin_unlock: |
1526 | spin_unlock_irqrestore(&atchan->lock, flags); | |
e1f7c9ee LD |
1527 | return ret; |
1528 | } | |
1529 | ||
1530 | /* Call must be protected by lock. */ | |
1531 | static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, | |
1532 | struct at_xdmac_desc *desc) | |
1533 | { | |
1534 | dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | |
1535 | ||
1536 | /* | |
1537 | * Remove the transfer from the transfer list then move the transfer | |
1538 | * descriptors into the free descriptors list. | |
1539 | */ | |
1540 | list_del(&desc->xfer_node); | |
1541 | list_splice_init(&desc->descs_list, &atchan->free_descs_list); | |
1542 | } | |
1543 | ||
1544 | static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) | |
1545 | { | |
1546 | struct at_xdmac_desc *desc; | |
4c374fc7 | 1547 | unsigned long flags; |
e1f7c9ee | 1548 | |
4c374fc7 | 1549 | spin_lock_irqsave(&atchan->lock, flags); |
e1f7c9ee LD |
1550 | |
1551 | /* | |
1552 | * If channel is enabled, do nothing, advance_work will be triggered | |
1553 | * after the interruption. | |
1554 | */ | |
1555 | if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) { | |
1556 | desc = list_first_entry(&atchan->xfers_list, | |
1557 | struct at_xdmac_desc, | |
1558 | xfer_node); | |
1559 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | |
1560 | if (!desc->active_xfer) | |
1561 | at_xdmac_start_xfer(atchan, desc); | |
1562 | } | |
1563 | ||
4c374fc7 | 1564 | spin_unlock_irqrestore(&atchan->lock, flags); |
e1f7c9ee LD |
1565 | } |
1566 | ||
1567 | static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) | |
1568 | { | |
1569 | struct at_xdmac_desc *desc; | |
1570 | struct dma_async_tx_descriptor *txd; | |
1571 | ||
1572 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | |
1573 | txd = &desc->tx_dma_desc; | |
1574 | ||
a1d4eaaf DJ |
1575 | if (txd->flags & DMA_PREP_INTERRUPT) |
1576 | dmaengine_desc_get_callback_invoke(txd, NULL); | |
e1f7c9ee LD |
1577 | } |
1578 | ||
1579 | static void at_xdmac_tasklet(unsigned long data) | |
1580 | { | |
1581 | struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data; | |
1582 | struct at_xdmac_desc *desc; | |
1583 | u32 error_mask; | |
1584 | ||
1585 | dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", | |
1586 | __func__, atchan->status); | |
1587 | ||
1588 | error_mask = AT_XDMAC_CIS_RBEIS | |
1589 | | AT_XDMAC_CIS_WBEIS | |
1590 | | AT_XDMAC_CIS_ROIS; | |
1591 | ||
1592 | if (at_xdmac_chan_is_cyclic(atchan)) { | |
1593 | at_xdmac_handle_cyclic(atchan); | |
1594 | } else if ((atchan->status & AT_XDMAC_CIS_LIS) | |
1595 | || (atchan->status & error_mask)) { | |
1596 | struct dma_async_tx_descriptor *txd; | |
1597 | ||
1598 | if (atchan->status & AT_XDMAC_CIS_RBEIS) | |
1599 | dev_err(chan2dev(&atchan->chan), "read bus error!!!"); | |
1600 | if (atchan->status & AT_XDMAC_CIS_WBEIS) | |
1601 | dev_err(chan2dev(&atchan->chan), "write bus error!!!"); | |
1602 | if (atchan->status & AT_XDMAC_CIS_ROIS) | |
1603 | dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); | |
1604 | ||
1605 | spin_lock_bh(&atchan->lock); | |
1606 | desc = list_first_entry(&atchan->xfers_list, | |
1607 | struct at_xdmac_desc, | |
1608 | xfer_node); | |
1609 | dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); | |
1610 | BUG_ON(!desc->active_xfer); | |
1611 | ||
1612 | txd = &desc->tx_dma_desc; | |
1613 | ||
1614 | at_xdmac_remove_xfer(atchan, desc); | |
1615 | spin_unlock_bh(&atchan->lock); | |
1616 | ||
1617 | if (!at_xdmac_chan_is_cyclic(atchan)) { | |
1618 | dma_cookie_complete(txd); | |
a1d4eaaf DJ |
1619 | if (txd->flags & DMA_PREP_INTERRUPT) |
1620 | dmaengine_desc_get_callback_invoke(txd, NULL); | |
e1f7c9ee LD |
1621 | } |
1622 | ||
1623 | dma_run_dependencies(txd); | |
1624 | ||
1625 | at_xdmac_advance_work(atchan); | |
1626 | } | |
1627 | } | |
1628 | ||
1629 | static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) | |
1630 | { | |
1631 | struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id; | |
1632 | struct at_xdmac_chan *atchan; | |
1633 | u32 imr, status, pending; | |
1634 | u32 chan_imr, chan_status; | |
1635 | int i, ret = IRQ_NONE; | |
1636 | ||
1637 | do { | |
1638 | imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM); | |
1639 | status = at_xdmac_read(atxdmac, AT_XDMAC_GIS); | |
1640 | pending = status & imr; | |
1641 | ||
1642 | dev_vdbg(atxdmac->dma.dev, | |
1643 | "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n", | |
1644 | __func__, status, imr, pending); | |
1645 | ||
1646 | if (!pending) | |
1647 | break; | |
1648 | ||
1649 | /* We have to find which channel has generated the interrupt. */ | |
1650 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | |
1651 | if (!((1 << i) & pending)) | |
1652 | continue; | |
1653 | ||
1654 | atchan = &atxdmac->chan[i]; | |
1655 | chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | |
1656 | chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); | |
1657 | atchan->status = chan_status & chan_imr; | |
1658 | dev_vdbg(atxdmac->dma.dev, | |
1659 | "%s: chan%d: imr=0x%x, status=0x%x\n", | |
1660 | __func__, i, chan_imr, chan_status); | |
1661 | dev_vdbg(chan2dev(&atchan->chan), | |
1662 | "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n", | |
1663 | __func__, | |
1664 | at_xdmac_chan_read(atchan, AT_XDMAC_CC), | |
1665 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDA), | |
1666 | at_xdmac_chan_read(atchan, AT_XDMAC_CNDC), | |
1667 | at_xdmac_chan_read(atchan, AT_XDMAC_CSA), | |
1668 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | |
1669 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | |
1670 | ||
1671 | if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) | |
1672 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | |
1673 | ||
1674 | tasklet_schedule(&atchan->tasklet); | |
1675 | ret = IRQ_HANDLED; | |
1676 | } | |
1677 | ||
1678 | } while (pending); | |
1679 | ||
1680 | return ret; | |
1681 | } | |
1682 | ||
1683 | static void at_xdmac_issue_pending(struct dma_chan *chan) | |
1684 | { | |
1685 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1686 | ||
1687 | dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); | |
1688 | ||
1689 | if (!at_xdmac_chan_is_cyclic(atchan)) | |
1690 | at_xdmac_advance_work(atchan); | |
1691 | ||
1692 | return; | |
1693 | } | |
1694 | ||
3d138877 LD |
1695 | static int at_xdmac_device_config(struct dma_chan *chan, |
1696 | struct dma_slave_config *config) | |
1697 | { | |
1698 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1699 | int ret; | |
4c374fc7 | 1700 | unsigned long flags; |
3d138877 LD |
1701 | |
1702 | dev_dbg(chan2dev(chan), "%s\n", __func__); | |
1703 | ||
4c374fc7 | 1704 | spin_lock_irqsave(&atchan->lock, flags); |
3d138877 | 1705 | ret = at_xdmac_set_slave_config(chan, config); |
4c374fc7 | 1706 | spin_unlock_irqrestore(&atchan->lock, flags); |
3d138877 LD |
1707 | |
1708 | return ret; | |
1709 | } | |
1710 | ||
1711 | static int at_xdmac_device_pause(struct dma_chan *chan) | |
e1f7c9ee | 1712 | { |
e1f7c9ee LD |
1713 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
1714 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | |
4c374fc7 | 1715 | unsigned long flags; |
e1f7c9ee | 1716 | |
3d138877 | 1717 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
e1f7c9ee | 1718 | |
cbb85e67 CP |
1719 | if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) |
1720 | return 0; | |
1721 | ||
4c374fc7 | 1722 | spin_lock_irqsave(&atchan->lock, flags); |
3d138877 | 1723 | at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); |
cbb85e67 CP |
1724 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) |
1725 | & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) | |
1726 | cpu_relax(); | |
4c374fc7 | 1727 | spin_unlock_irqrestore(&atchan->lock, flags); |
e1f7c9ee | 1728 | |
3d138877 LD |
1729 | return 0; |
1730 | } | |
e1f7c9ee | 1731 | |
3d138877 LD |
1732 | static int at_xdmac_device_resume(struct dma_chan *chan) |
1733 | { | |
1734 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1735 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | |
4c374fc7 | 1736 | unsigned long flags; |
e1f7c9ee | 1737 | |
3d138877 | 1738 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
e1f7c9ee | 1739 | |
4c374fc7 | 1740 | spin_lock_irqsave(&atchan->lock, flags); |
0434a231 | 1741 | if (!at_xdmac_chan_is_paused(atchan)) { |
4c374fc7 | 1742 | spin_unlock_irqrestore(&atchan->lock, flags); |
3d138877 | 1743 | return 0; |
0434a231 | 1744 | } |
e1f7c9ee | 1745 | |
3d138877 LD |
1746 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); |
1747 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | |
4c374fc7 | 1748 | spin_unlock_irqrestore(&atchan->lock, flags); |
3d138877 LD |
1749 | |
1750 | return 0; | |
1751 | } | |
e1f7c9ee | 1752 | |
3d138877 LD |
1753 | static int at_xdmac_device_terminate_all(struct dma_chan *chan) |
1754 | { | |
1755 | struct at_xdmac_desc *desc, *_desc; | |
1756 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1757 | struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); | |
4c374fc7 | 1758 | unsigned long flags; |
e1f7c9ee | 1759 | |
3d138877 | 1760 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
e1f7c9ee | 1761 | |
4c374fc7 | 1762 | spin_lock_irqsave(&atchan->lock, flags); |
3d138877 LD |
1763 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); |
1764 | while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) | |
1765 | cpu_relax(); | |
e1f7c9ee | 1766 | |
3d138877 LD |
1767 | /* Cancel all pending transfers. */ |
1768 | list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) | |
1769 | at_xdmac_remove_xfer(atchan, desc); | |
e1f7c9ee | 1770 | |
611dcadb | 1771 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); |
3d138877 | 1772 | clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); |
4c374fc7 | 1773 | spin_unlock_irqrestore(&atchan->lock, flags); |
e1f7c9ee | 1774 | |
3d138877 | 1775 | return 0; |
e1f7c9ee LD |
1776 | } |
1777 | ||
1778 | static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) | |
1779 | { | |
1780 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1781 | struct at_xdmac_desc *desc; | |
1782 | int i; | |
4c374fc7 | 1783 | unsigned long flags; |
e1f7c9ee | 1784 | |
4c374fc7 | 1785 | spin_lock_irqsave(&atchan->lock, flags); |
e1f7c9ee LD |
1786 | |
1787 | if (at_xdmac_chan_is_enabled(atchan)) { | |
1788 | dev_err(chan2dev(chan), | |
1789 | "can't allocate channel resources (channel enabled)\n"); | |
1790 | i = -EIO; | |
1791 | goto spin_unlock; | |
1792 | } | |
1793 | ||
1794 | if (!list_empty(&atchan->free_descs_list)) { | |
1795 | dev_err(chan2dev(chan), | |
1796 | "can't allocate channel resources (channel not free from a previous use)\n"); | |
1797 | i = -EIO; | |
1798 | goto spin_unlock; | |
1799 | } | |
1800 | ||
1801 | for (i = 0; i < init_nr_desc_per_channel; i++) { | |
1802 | desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC); | |
1803 | if (!desc) { | |
1804 | dev_warn(chan2dev(chan), | |
1805 | "only %d descriptors have been allocated\n", i); | |
1806 | break; | |
1807 | } | |
1808 | list_add_tail(&desc->desc_node, &atchan->free_descs_list); | |
1809 | } | |
1810 | ||
1811 | dma_cookie_init(chan); | |
1812 | ||
1813 | dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); | |
1814 | ||
1815 | spin_unlock: | |
4c374fc7 | 1816 | spin_unlock_irqrestore(&atchan->lock, flags); |
e1f7c9ee LD |
1817 | return i; |
1818 | } | |
1819 | ||
1820 | static void at_xdmac_free_chan_resources(struct dma_chan *chan) | |
1821 | { | |
1822 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1823 | struct at_xdmac *atxdmac = to_at_xdmac(chan->device); | |
1824 | struct at_xdmac_desc *desc, *_desc; | |
1825 | ||
1826 | list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { | |
1827 | dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc); | |
1828 | list_del(&desc->desc_node); | |
1829 | dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); | |
1830 | } | |
1831 | ||
1832 | return; | |
1833 | } | |
1834 | ||
e1f7c9ee LD |
1835 | #ifdef CONFIG_PM |
1836 | static int atmel_xdmac_prepare(struct device *dev) | |
1837 | { | |
1838 | struct platform_device *pdev = to_platform_device(dev); | |
1839 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | |
1840 | struct dma_chan *chan, *_chan; | |
1841 | ||
1842 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | |
1843 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1844 | ||
1845 | /* Wait for transfer completion, except in cyclic case. */ | |
1846 | if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan)) | |
1847 | return -EAGAIN; | |
1848 | } | |
1849 | return 0; | |
1850 | } | |
1851 | #else | |
1852 | # define atmel_xdmac_prepare NULL | |
1853 | #endif | |
1854 | ||
1855 | #ifdef CONFIG_PM_SLEEP | |
1856 | static int atmel_xdmac_suspend(struct device *dev) | |
1857 | { | |
1858 | struct platform_device *pdev = to_platform_device(dev); | |
1859 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | |
1860 | struct dma_chan *chan, *_chan; | |
1861 | ||
1862 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | |
1863 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | |
1864 | ||
734bb9a7 | 1865 | atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC); |
e1f7c9ee LD |
1866 | if (at_xdmac_chan_is_cyclic(atchan)) { |
1867 | if (!at_xdmac_chan_is_paused(atchan)) | |
3d138877 | 1868 | at_xdmac_device_pause(chan); |
e1f7c9ee LD |
1869 | atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); |
1870 | atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); | |
1871 | atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); | |
1872 | } | |
1873 | } | |
1874 | atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM); | |
1875 | ||
1876 | at_xdmac_off(atxdmac); | |
1877 | clk_disable_unprepare(atxdmac->clk); | |
1878 | return 0; | |
1879 | } | |
1880 | ||
1881 | static int atmel_xdmac_resume(struct device *dev) | |
1882 | { | |
1883 | struct platform_device *pdev = to_platform_device(dev); | |
1884 | struct at_xdmac *atxdmac = platform_get_drvdata(pdev); | |
1885 | struct at_xdmac_chan *atchan; | |
1886 | struct dma_chan *chan, *_chan; | |
1887 | int i; | |
e1f7c9ee LD |
1888 | |
1889 | clk_prepare_enable(atxdmac->clk); | |
1890 | ||
1891 | /* Clear pending interrupts. */ | |
1892 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | |
1893 | atchan = &atxdmac->chan[i]; | |
1894 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) | |
1895 | cpu_relax(); | |
1896 | } | |
1897 | ||
1898 | at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim); | |
1899 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); | |
1900 | list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { | |
1901 | atchan = to_at_xdmac_chan(chan); | |
734bb9a7 | 1902 | at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); |
e1f7c9ee | 1903 | if (at_xdmac_chan_is_cyclic(atchan)) { |
611dcadb SW |
1904 | if (at_xdmac_chan_is_paused(atchan)) |
1905 | at_xdmac_device_resume(chan); | |
e1f7c9ee LD |
1906 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); |
1907 | at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); | |
1908 | at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim); | |
1909 | wmb(); | |
1910 | at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask); | |
1911 | } | |
1912 | } | |
1913 | return 0; | |
1914 | } | |
1915 | #endif /* CONFIG_PM_SLEEP */ | |
1916 | ||
1917 | static int at_xdmac_probe(struct platform_device *pdev) | |
1918 | { | |
1919 | struct resource *res; | |
1920 | struct at_xdmac *atxdmac; | |
1921 | int irq, size, nr_channels, i, ret; | |
1922 | void __iomem *base; | |
1923 | u32 reg; | |
1924 | ||
1925 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1926 | if (!res) | |
1927 | return -EINVAL; | |
1928 | ||
1929 | irq = platform_get_irq(pdev, 0); | |
1930 | if (irq < 0) | |
1931 | return irq; | |
1932 | ||
1933 | base = devm_ioremap_resource(&pdev->dev, res); | |
1934 | if (IS_ERR(base)) | |
1935 | return PTR_ERR(base); | |
1936 | ||
1937 | /* | |
1938 | * Read number of xdmac channels, read helper function can't be used | |
1939 | * since atxdmac is not yet allocated and we need to know the number | |
1940 | * of channels to do the allocation. | |
1941 | */ | |
1942 | reg = readl_relaxed(base + AT_XDMAC_GTYPE); | |
1943 | nr_channels = AT_XDMAC_NB_CH(reg); | |
1944 | if (nr_channels > AT_XDMAC_MAX_CHAN) { | |
1945 | dev_err(&pdev->dev, "invalid number of channels (%u)\n", | |
1946 | nr_channels); | |
1947 | return -EINVAL; | |
1948 | } | |
1949 | ||
1950 | size = sizeof(*atxdmac); | |
1951 | size += nr_channels * sizeof(struct at_xdmac_chan); | |
1952 | atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); | |
1953 | if (!atxdmac) { | |
1954 | dev_err(&pdev->dev, "can't allocate at_xdmac structure\n"); | |
1955 | return -ENOMEM; | |
1956 | } | |
1957 | ||
1958 | atxdmac->regs = base; | |
1959 | atxdmac->irq = irq; | |
1960 | ||
1961 | atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk"); | |
1962 | if (IS_ERR(atxdmac->clk)) { | |
1963 | dev_err(&pdev->dev, "can't get dma_clk\n"); | |
1964 | return PTR_ERR(atxdmac->clk); | |
1965 | } | |
1966 | ||
1967 | /* Do not use dev res to prevent races with tasklet */ | |
1968 | ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac); | |
1969 | if (ret) { | |
1970 | dev_err(&pdev->dev, "can't request irq\n"); | |
1971 | return ret; | |
1972 | } | |
1973 | ||
1974 | ret = clk_prepare_enable(atxdmac->clk); | |
1975 | if (ret) { | |
1976 | dev_err(&pdev->dev, "can't prepare or enable clock\n"); | |
1977 | goto err_free_irq; | |
1978 | } | |
1979 | ||
1980 | atxdmac->at_xdmac_desc_pool = | |
1981 | dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | |
1982 | sizeof(struct at_xdmac_desc), 4, 0); | |
1983 | if (!atxdmac->at_xdmac_desc_pool) { | |
1984 | dev_err(&pdev->dev, "no memory for descriptors dma pool\n"); | |
1985 | ret = -ENOMEM; | |
1986 | goto err_clk_disable; | |
1987 | } | |
1988 | ||
1989 | dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); | |
6007ccb5 | 1990 | dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); |
e1f7c9ee | 1991 | dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); |
b206d9a2 | 1992 | dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask); |
67a6eedc | 1993 | dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask); |
e1f7c9ee | 1994 | dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); |
fef4cbf2 LD |
1995 | /* |
1996 | * Without DMA_PRIVATE the driver is not able to allocate more than | |
1997 | * one channel, second allocation fails in private_candidate. | |
1998 | */ | |
1999 | dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask); | |
e1f7c9ee LD |
2000 | atxdmac->dma.dev = &pdev->dev; |
2001 | atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources; | |
2002 | atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources; | |
2003 | atxdmac->dma.device_tx_status = at_xdmac_tx_status; | |
2004 | atxdmac->dma.device_issue_pending = at_xdmac_issue_pending; | |
2005 | atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; | |
6007ccb5 | 2006 | atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; |
e1f7c9ee | 2007 | atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; |
b206d9a2 | 2008 | atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset; |
67a6eedc | 2009 | atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg; |
e1f7c9ee | 2010 | atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; |
3d138877 LD |
2011 | atxdmac->dma.device_config = at_xdmac_device_config; |
2012 | atxdmac->dma.device_pause = at_xdmac_device_pause; | |
2013 | atxdmac->dma.device_resume = at_xdmac_device_resume; | |
2014 | atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; | |
8ac82f88 LD |
2015 | atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; |
2016 | atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; | |
2017 | atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
2018 | atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
e1f7c9ee LD |
2019 | |
2020 | /* Disable all chans and interrupts. */ | |
2021 | at_xdmac_off(atxdmac); | |
2022 | ||
2023 | /* Init channels. */ | |
2024 | INIT_LIST_HEAD(&atxdmac->dma.channels); | |
2025 | for (i = 0; i < nr_channels; i++) { | |
2026 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; | |
2027 | ||
2028 | atchan->chan.device = &atxdmac->dma; | |
2029 | list_add_tail(&atchan->chan.device_node, | |
2030 | &atxdmac->dma.channels); | |
2031 | ||
2032 | atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i); | |
2033 | atchan->mask = 1 << i; | |
2034 | ||
2035 | spin_lock_init(&atchan->lock); | |
2036 | INIT_LIST_HEAD(&atchan->xfers_list); | |
2037 | INIT_LIST_HEAD(&atchan->free_descs_list); | |
2038 | tasklet_init(&atchan->tasklet, at_xdmac_tasklet, | |
2039 | (unsigned long)atchan); | |
2040 | ||
2041 | /* Clear pending interrupts. */ | |
2042 | while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS)) | |
2043 | cpu_relax(); | |
2044 | } | |
2045 | platform_set_drvdata(pdev, atxdmac); | |
2046 | ||
2047 | ret = dma_async_device_register(&atxdmac->dma); | |
2048 | if (ret) { | |
2049 | dev_err(&pdev->dev, "fail to register DMA engine device\n"); | |
2050 | goto err_clk_disable; | |
2051 | } | |
2052 | ||
2053 | ret = of_dma_controller_register(pdev->dev.of_node, | |
2054 | at_xdmac_xlate, atxdmac); | |
2055 | if (ret) { | |
2056 | dev_err(&pdev->dev, "could not register of dma controller\n"); | |
2057 | goto err_dma_unregister; | |
2058 | } | |
2059 | ||
2060 | dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n", | |
2061 | nr_channels, atxdmac->regs); | |
2062 | ||
2063 | return 0; | |
2064 | ||
2065 | err_dma_unregister: | |
2066 | dma_async_device_unregister(&atxdmac->dma); | |
2067 | err_clk_disable: | |
2068 | clk_disable_unprepare(atxdmac->clk); | |
2069 | err_free_irq: | |
6a8b0c6b | 2070 | free_irq(atxdmac->irq, atxdmac); |
e1f7c9ee LD |
2071 | return ret; |
2072 | } | |
2073 | ||
2074 | static int at_xdmac_remove(struct platform_device *pdev) | |
2075 | { | |
2076 | struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev); | |
2077 | int i; | |
2078 | ||
2079 | at_xdmac_off(atxdmac); | |
2080 | of_dma_controller_free(pdev->dev.of_node); | |
2081 | dma_async_device_unregister(&atxdmac->dma); | |
2082 | clk_disable_unprepare(atxdmac->clk); | |
2083 | ||
6a8b0c6b | 2084 | free_irq(atxdmac->irq, atxdmac); |
e1f7c9ee LD |
2085 | |
2086 | for (i = 0; i < atxdmac->dma.chancnt; i++) { | |
2087 | struct at_xdmac_chan *atchan = &atxdmac->chan[i]; | |
2088 | ||
2089 | tasklet_kill(&atchan->tasklet); | |
2090 | at_xdmac_free_chan_resources(&atchan->chan); | |
2091 | } | |
2092 | ||
2093 | return 0; | |
2094 | } | |
2095 | ||
2096 | static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = { | |
2097 | .prepare = atmel_xdmac_prepare, | |
2098 | SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume) | |
2099 | }; | |
2100 | ||
2101 | static const struct of_device_id atmel_xdmac_dt_ids[] = { | |
2102 | { | |
2103 | .compatible = "atmel,sama5d4-dma", | |
2104 | }, { | |
2105 | /* sentinel */ | |
2106 | } | |
2107 | }; | |
2108 | MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids); | |
2109 | ||
2110 | static struct platform_driver at_xdmac_driver = { | |
2111 | .probe = at_xdmac_probe, | |
2112 | .remove = at_xdmac_remove, | |
2113 | .driver = { | |
2114 | .name = "at_xdmac", | |
e1f7c9ee LD |
2115 | .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), |
2116 | .pm = &atmel_xdmac_dev_pm_ops, | |
2117 | } | |
2118 | }; | |
2119 | ||
2120 | static int __init at_xdmac_init(void) | |
2121 | { | |
2122 | return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe); | |
2123 | } | |
2124 | subsys_initcall(at_xdmac_init); | |
2125 | ||
2126 | MODULE_DESCRIPTION("Atmel Extended DMA Controller driver"); | |
2127 | MODULE_AUTHOR("Ludovic Desroches <[email protected]>"); | |
2128 | MODULE_LICENSE("GPL"); |