]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
d894fc60 AS |
2 | /* |
3 | * Ingenic JZ4780 DMA controller | |
4 | * | |
5 | * Copyright (c) 2015 Imagination Technologies | |
6 | * Author: Alex Smith <[email protected]> | |
d894fc60 AS |
7 | */ |
8 | ||
9 | #include <linux/clk.h> | |
10 | #include <linux/dmapool.h> | |
2128565a | 11 | #include <linux/dma-mapping.h> |
d894fc60 AS |
12 | #include <linux/init.h> |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/of.h> | |
16 | #include <linux/of_dma.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/slab.h> | |
19 | ||
20 | #include "dmaengine.h" | |
21 | #include "virt-dma.h" | |
22 | ||
d894fc60 | 23 | /* Global registers. */ |
33633583 PC |
24 | #define JZ_DMA_REG_DMAC 0x00 |
25 | #define JZ_DMA_REG_DIRQP 0x04 | |
26 | #define JZ_DMA_REG_DDR 0x08 | |
27 | #define JZ_DMA_REG_DDRS 0x0c | |
29870eb7 PC |
28 | #define JZ_DMA_REG_DCKE 0x10 |
29 | #define JZ_DMA_REG_DCKES 0x14 | |
30 | #define JZ_DMA_REG_DCKEC 0x18 | |
33633583 PC |
31 | #define JZ_DMA_REG_DMACP 0x1c |
32 | #define JZ_DMA_REG_DSIRQP 0x20 | |
33 | #define JZ_DMA_REG_DSIRQM 0x24 | |
34 | #define JZ_DMA_REG_DCIRQP 0x28 | |
35 | #define JZ_DMA_REG_DCIRQM 0x2c | |
d894fc60 AS |
36 | |
37 | /* Per-channel registers. */ | |
38 | #define JZ_DMA_REG_CHAN(n) (n * 0x20) | |
33633583 PC |
39 | #define JZ_DMA_REG_DSA 0x00 |
40 | #define JZ_DMA_REG_DTA 0x04 | |
41 | #define JZ_DMA_REG_DTC 0x08 | |
42 | #define JZ_DMA_REG_DRT 0x0c | |
43 | #define JZ_DMA_REG_DCS 0x10 | |
44 | #define JZ_DMA_REG_DCM 0x14 | |
45 | #define JZ_DMA_REG_DDA 0x18 | |
46 | #define JZ_DMA_REG_DSD 0x1c | |
d894fc60 AS |
47 | |
48 | #define JZ_DMA_DMAC_DMAE BIT(0) | |
49 | #define JZ_DMA_DMAC_AR BIT(2) | |
50 | #define JZ_DMA_DMAC_HLT BIT(3) | |
17a8e30e | 51 | #define JZ_DMA_DMAC_FAIC BIT(27) |
d894fc60 AS |
52 | #define JZ_DMA_DMAC_FMSC BIT(31) |
53 | ||
54 | #define JZ_DMA_DRT_AUTO 0x8 | |
55 | ||
56 | #define JZ_DMA_DCS_CTE BIT(0) | |
57 | #define JZ_DMA_DCS_HLT BIT(2) | |
58 | #define JZ_DMA_DCS_TT BIT(3) | |
59 | #define JZ_DMA_DCS_AR BIT(4) | |
60 | #define JZ_DMA_DCS_DES8 BIT(30) | |
61 | ||
62 | #define JZ_DMA_DCM_LINK BIT(0) | |
63 | #define JZ_DMA_DCM_TIE BIT(1) | |
64 | #define JZ_DMA_DCM_STDE BIT(2) | |
65 | #define JZ_DMA_DCM_TSZ_SHIFT 8 | |
66 | #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT) | |
67 | #define JZ_DMA_DCM_DP_SHIFT 12 | |
68 | #define JZ_DMA_DCM_SP_SHIFT 14 | |
69 | #define JZ_DMA_DCM_DAI BIT(22) | |
70 | #define JZ_DMA_DCM_SAI BIT(23) | |
71 | ||
72 | #define JZ_DMA_SIZE_4_BYTE 0x0 | |
73 | #define JZ_DMA_SIZE_1_BYTE 0x1 | |
74 | #define JZ_DMA_SIZE_2_BYTE 0x2 | |
75 | #define JZ_DMA_SIZE_16_BYTE 0x3 | |
76 | #define JZ_DMA_SIZE_32_BYTE 0x4 | |
77 | #define JZ_DMA_SIZE_64_BYTE 0x5 | |
78 | #define JZ_DMA_SIZE_128_BYTE 0x6 | |
79 | ||
80 | #define JZ_DMA_WIDTH_32_BIT 0x0 | |
81 | #define JZ_DMA_WIDTH_8_BIT 0x1 | |
82 | #define JZ_DMA_WIDTH_16_BIT 0x2 | |
83 | ||
84 | #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
85 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
86 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
87 | ||
33633583 PC |
88 | #define JZ4780_DMA_CTRL_OFFSET 0x1000 |
89 | ||
29870eb7 PC |
90 | /* macros for use with jz4780_dma_soc_data.flags */ |
91 | #define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0) | |
92 | #define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1) | |
93 | #define JZ_SOC_DATA_PER_CHAN_PM BIT(2) | |
ae9156b6 | 94 | #define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3) |
f4c255f1 | 95 | #define JZ_SOC_DATA_BREAK_LINKS BIT(4) |
29870eb7 | 96 | |
d894fc60 AS |
97 | /** |
98 | * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller. | |
99 | * @dcm: value for the DCM (channel command) register | |
100 | * @dsa: source address | |
101 | * @dta: target address | |
102 | * @dtc: transfer count (number of blocks of the transfer size specified in DCM | |
103 | * to transfer) in the low 24 bits, offset of the next descriptor from the | |
104 | * descriptor base address in the upper 8 bits. | |
d894fc60 AS |
105 | */ |
106 | struct jz4780_dma_hwdesc { | |
c8c0cda8 PC |
107 | u32 dcm; |
108 | u32 dsa; | |
109 | u32 dta; | |
110 | u32 dtc; | |
d894fc60 AS |
111 | }; |
112 | ||
113 | /* Size of allocations for hardware descriptor blocks. */ | |
114 | #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE | |
115 | #define JZ_DMA_MAX_DESC \ | |
116 | (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc)) | |
117 | ||
118 | struct jz4780_dma_desc { | |
119 | struct virt_dma_desc vdesc; | |
120 | ||
121 | struct jz4780_dma_hwdesc *desc; | |
122 | dma_addr_t desc_phys; | |
123 | unsigned int count; | |
124 | enum dma_transaction_type type; | |
76a09663 | 125 | u32 transfer_type; |
c8c0cda8 | 126 | u32 status; |
d894fc60 AS |
127 | }; |
128 | ||
129 | struct jz4780_dma_chan { | |
130 | struct virt_dma_chan vchan; | |
131 | unsigned int id; | |
132 | struct dma_pool *desc_pool; | |
133 | ||
76a09663 | 134 | u32 transfer_type_tx, transfer_type_rx; |
c8c0cda8 | 135 | u32 transfer_shift; |
d894fc60 AS |
136 | struct dma_slave_config config; |
137 | ||
138 | struct jz4780_dma_desc *desc; | |
139 | unsigned int curr_hwdesc; | |
140 | }; | |
141 | ||
6147b032 PC |
142 | struct jz4780_dma_soc_data { |
143 | unsigned int nb_channels; | |
29870eb7 PC |
144 | unsigned int transfer_ord_max; |
145 | unsigned long flags; | |
6147b032 PC |
146 | }; |
147 | ||
d894fc60 AS |
148 | struct jz4780_dma_dev { |
149 | struct dma_device dma_device; | |
33633583 PC |
150 | void __iomem *chn_base; |
151 | void __iomem *ctrl_base; | |
d894fc60 AS |
152 | struct clk *clk; |
153 | unsigned int irq; | |
6147b032 | 154 | const struct jz4780_dma_soc_data *soc_data; |
d894fc60 | 155 | |
c8c0cda8 | 156 | u32 chan_reserved; |
6147b032 | 157 | struct jz4780_dma_chan chan[]; |
d894fc60 AS |
158 | }; |
159 | ||
026fd406 | 160 | struct jz4780_dma_filter_data { |
76a09663 | 161 | u32 transfer_type_tx, transfer_type_rx; |
d894fc60 AS |
162 | int channel; |
163 | }; | |
164 | ||
165 | static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan) | |
166 | { | |
167 | return container_of(chan, struct jz4780_dma_chan, vchan.chan); | |
168 | } | |
169 | ||
170 | static inline struct jz4780_dma_desc *to_jz4780_dma_desc( | |
171 | struct virt_dma_desc *vdesc) | |
172 | { | |
173 | return container_of(vdesc, struct jz4780_dma_desc, vdesc); | |
174 | } | |
175 | ||
176 | static inline struct jz4780_dma_dev *jz4780_dma_chan_parent( | |
177 | struct jz4780_dma_chan *jzchan) | |
178 | { | |
179 | return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev, | |
180 | dma_device); | |
181 | } | |
182 | ||
c8c0cda8 | 183 | static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma, |
33633583 PC |
184 | unsigned int chn, unsigned int reg) |
185 | { | |
186 | return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn)); | |
187 | } | |
188 | ||
189 | static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma, | |
c8c0cda8 | 190 | unsigned int chn, unsigned int reg, u32 val) |
33633583 PC |
191 | { |
192 | writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn)); | |
193 | } | |
194 | ||
c8c0cda8 | 195 | static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma, |
d894fc60 AS |
196 | unsigned int reg) |
197 | { | |
33633583 | 198 | return readl(jzdma->ctrl_base + reg); |
d894fc60 AS |
199 | } |
200 | ||
33633583 | 201 | static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma, |
c8c0cda8 | 202 | unsigned int reg, u32 val) |
d894fc60 | 203 | { |
33633583 | 204 | writel(val, jzdma->ctrl_base + reg); |
d894fc60 AS |
205 | } |
206 | ||
29870eb7 PC |
207 | static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma, |
208 | unsigned int chn) | |
209 | { | |
ae9156b6 PC |
210 | if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) { |
211 | unsigned int reg; | |
212 | ||
213 | if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC) | |
214 | reg = JZ_DMA_REG_DCKE; | |
215 | else | |
216 | reg = JZ_DMA_REG_DCKES; | |
217 | ||
218 | jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn)); | |
219 | } | |
29870eb7 PC |
220 | } |
221 | ||
222 | static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma, | |
223 | unsigned int chn) | |
224 | { | |
ae9156b6 PC |
225 | if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) && |
226 | !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)) | |
29870eb7 | 227 | jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn)); |
d894fc60 AS |
228 | } |
229 | ||
76a09663 PC |
230 | static struct jz4780_dma_desc * |
231 | jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count, | |
232 | enum dma_transaction_type type, | |
233 | enum dma_transfer_direction direction) | |
d894fc60 AS |
234 | { |
235 | struct jz4780_dma_desc *desc; | |
236 | ||
237 | if (count > JZ_DMA_MAX_DESC) | |
238 | return NULL; | |
239 | ||
240 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | |
241 | if (!desc) | |
242 | return NULL; | |
243 | ||
244 | desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT, | |
245 | &desc->desc_phys); | |
246 | if (!desc->desc) { | |
247 | kfree(desc); | |
248 | return NULL; | |
249 | } | |
250 | ||
251 | desc->count = count; | |
252 | desc->type = type; | |
76a09663 PC |
253 | |
254 | if (direction == DMA_DEV_TO_MEM) | |
255 | desc->transfer_type = jzchan->transfer_type_rx; | |
256 | else | |
257 | desc->transfer_type = jzchan->transfer_type_tx; | |
258 | ||
d894fc60 AS |
259 | return desc; |
260 | } | |
261 | ||
262 | static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc) | |
263 | { | |
264 | struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc); | |
265 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan); | |
266 | ||
267 | dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys); | |
268 | kfree(desc); | |
269 | } | |
270 | ||
c8c0cda8 PC |
271 | static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan, |
272 | unsigned long val, u32 *shift) | |
d894fc60 | 273 | { |
29870eb7 | 274 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); |
dc578f31 | 275 | int ord = ffs(val) - 1; |
d894fc60 | 276 | |
dc578f31 AS |
277 | /* |
278 | * 8 byte transfer sizes unsupported so fall back on 4. If it's larger | |
279 | * than the maximum, just limit it. It is perfectly safe to fall back | |
280 | * in this way since we won't exceed the maximum burst size supported | |
281 | * by the device, the only effect is reduced efficiency. This is better | |
282 | * than refusing to perform the request at all. | |
283 | */ | |
284 | if (ord == 3) | |
285 | ord = 2; | |
29870eb7 PC |
286 | else if (ord > jzdma->soc_data->transfer_ord_max) |
287 | ord = jzdma->soc_data->transfer_ord_max; | |
dc578f31 AS |
288 | |
289 | *shift = ord; | |
290 | ||
291 | switch (ord) { | |
d894fc60 AS |
292 | case 0: |
293 | return JZ_DMA_SIZE_1_BYTE; | |
294 | case 1: | |
295 | return JZ_DMA_SIZE_2_BYTE; | |
296 | case 2: | |
297 | return JZ_DMA_SIZE_4_BYTE; | |
298 | case 4: | |
299 | return JZ_DMA_SIZE_16_BYTE; | |
300 | case 5: | |
301 | return JZ_DMA_SIZE_32_BYTE; | |
302 | case 6: | |
303 | return JZ_DMA_SIZE_64_BYTE; | |
d894fc60 | 304 | default: |
dc578f31 | 305 | return JZ_DMA_SIZE_128_BYTE; |
d894fc60 AS |
306 | } |
307 | } | |
308 | ||
839896ef | 309 | static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, |
d894fc60 AS |
310 | struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, |
311 | enum dma_transfer_direction direction) | |
312 | { | |
313 | struct dma_slave_config *config = &jzchan->config; | |
c8c0cda8 | 314 | u32 width, maxburst, tsz; |
d894fc60 AS |
315 | |
316 | if (direction == DMA_MEM_TO_DEV) { | |
317 | desc->dcm = JZ_DMA_DCM_SAI; | |
318 | desc->dsa = addr; | |
319 | desc->dta = config->dst_addr; | |
d894fc60 AS |
320 | |
321 | width = config->dst_addr_width; | |
322 | maxburst = config->dst_maxburst; | |
323 | } else { | |
324 | desc->dcm = JZ_DMA_DCM_DAI; | |
325 | desc->dsa = config->src_addr; | |
326 | desc->dta = addr; | |
d894fc60 AS |
327 | |
328 | width = config->src_addr_width; | |
329 | maxburst = config->src_maxburst; | |
330 | } | |
331 | ||
332 | /* | |
333 | * This calculates the maximum transfer size that can be used with the | |
334 | * given address, length, width and maximum burst size. The address | |
335 | * must be aligned to the transfer size, the total length must be | |
336 | * divisible by the transfer size, and we must not use more than the | |
337 | * maximum burst specified by the user. | |
338 | */ | |
29870eb7 | 339 | tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst), |
dc578f31 | 340 | &jzchan->transfer_shift); |
d894fc60 AS |
341 | |
342 | switch (width) { | |
343 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
344 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
345 | break; | |
346 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
347 | width = JZ_DMA_WIDTH_32_BIT; | |
348 | break; | |
349 | default: | |
350 | return -EINVAL; | |
351 | } | |
352 | ||
353 | desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT; | |
354 | desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT; | |
355 | desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT; | |
356 | ||
dc578f31 | 357 | desc->dtc = len >> jzchan->transfer_shift; |
839896ef | 358 | return 0; |
d894fc60 AS |
359 | } |
360 | ||
361 | static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( | |
362 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | |
46fa5168 AS |
363 | enum dma_transfer_direction direction, unsigned long flags, |
364 | void *context) | |
d894fc60 AS |
365 | { |
366 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
f4c255f1 | 367 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); |
d894fc60 AS |
368 | struct jz4780_dma_desc *desc; |
369 | unsigned int i; | |
370 | int err; | |
371 | ||
76a09663 | 372 | desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction); |
d894fc60 AS |
373 | if (!desc) |
374 | return NULL; | |
375 | ||
376 | for (i = 0; i < sg_len; i++) { | |
377 | err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], | |
839896ef AS |
378 | sg_dma_address(&sgl[i]), |
379 | sg_dma_len(&sgl[i]), | |
380 | direction); | |
fc878efe CIK |
381 | if (err < 0) { |
382 | jz4780_dma_desc_free(&jzchan->desc->vdesc); | |
839896ef | 383 | return NULL; |
fc878efe | 384 | } |
d894fc60 AS |
385 | |
386 | desc->desc[i].dcm |= JZ_DMA_DCM_TIE; | |
387 | ||
f4c255f1 PC |
388 | if (i != (sg_len - 1) && |
389 | !(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) { | |
0d7c11af | 390 | /* Automatically proceed to the next descriptor. */ |
d894fc60 AS |
391 | desc->desc[i].dcm |= JZ_DMA_DCM_LINK; |
392 | ||
393 | /* | |
394 | * The upper 8 bits of the DTC field in the descriptor | |
395 | * must be set to (offset from descriptor base of next | |
396 | * descriptor >> 4). | |
397 | */ | |
398 | desc->desc[i].dtc |= | |
399 | (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; | |
400 | } | |
401 | } | |
402 | ||
403 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | |
404 | } | |
405 | ||
406 | static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( | |
407 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
408 | size_t period_len, enum dma_transfer_direction direction, | |
409 | unsigned long flags) | |
410 | { | |
411 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
412 | struct jz4780_dma_desc *desc; | |
413 | unsigned int periods, i; | |
414 | int err; | |
415 | ||
416 | if (buf_len % period_len) | |
417 | return NULL; | |
418 | ||
419 | periods = buf_len / period_len; | |
420 | ||
76a09663 | 421 | desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction); |
d894fc60 AS |
422 | if (!desc) |
423 | return NULL; | |
424 | ||
425 | for (i = 0; i < periods; i++) { | |
426 | err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr, | |
839896ef | 427 | period_len, direction); |
fc878efe CIK |
428 | if (err < 0) { |
429 | jz4780_dma_desc_free(&jzchan->desc->vdesc); | |
839896ef | 430 | return NULL; |
fc878efe | 431 | } |
d894fc60 AS |
432 | |
433 | buf_addr += period_len; | |
434 | ||
435 | /* | |
436 | * Set the link bit to indicate that the controller should | |
437 | * automatically proceed to the next descriptor. In | |
438 | * jz4780_dma_begin(), this will be cleared if we need to issue | |
439 | * an interrupt after each period. | |
440 | */ | |
441 | desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK; | |
442 | ||
443 | /* | |
444 | * The upper 8 bits of the DTC field in the descriptor must be | |
445 | * set to (offset from descriptor base of next descriptor >> 4). | |
446 | * If this is the last descriptor, link it back to the first, | |
447 | * i.e. leave offset set to 0, otherwise point to the next one. | |
448 | */ | |
449 | if (i != (periods - 1)) { | |
450 | desc->desc[i].dtc |= | |
451 | (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; | |
452 | } | |
453 | } | |
454 | ||
455 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | |
456 | } | |
457 | ||
4f5db8c8 | 458 | static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( |
d894fc60 AS |
459 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
460 | size_t len, unsigned long flags) | |
461 | { | |
462 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
463 | struct jz4780_dma_desc *desc; | |
c8c0cda8 | 464 | u32 tsz; |
d894fc60 | 465 | |
76a09663 | 466 | desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0); |
d894fc60 AS |
467 | if (!desc) |
468 | return NULL; | |
469 | ||
29870eb7 | 470 | tsz = jz4780_dma_transfer_size(jzchan, dest | src | len, |
dc578f31 | 471 | &jzchan->transfer_shift); |
d894fc60 | 472 | |
76a09663 | 473 | desc->transfer_type = JZ_DMA_DRT_AUTO; |
5eed7d84 | 474 | |
d894fc60 AS |
475 | desc->desc[0].dsa = src; |
476 | desc->desc[0].dta = dest; | |
d894fc60 AS |
477 | desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI | |
478 | tsz << JZ_DMA_DCM_TSZ_SHIFT | | |
479 | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT | | |
480 | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT; | |
839896ef | 481 | desc->desc[0].dtc = len >> jzchan->transfer_shift; |
d894fc60 AS |
482 | |
483 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | |
484 | } | |
485 | ||
486 | static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan) | |
487 | { | |
488 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | |
489 | struct virt_dma_desc *vdesc; | |
490 | unsigned int i; | |
491 | dma_addr_t desc_phys; | |
492 | ||
493 | if (!jzchan->desc) { | |
494 | vdesc = vchan_next_desc(&jzchan->vchan); | |
495 | if (!vdesc) | |
496 | return; | |
497 | ||
498 | list_del(&vdesc->node); | |
499 | ||
500 | jzchan->desc = to_jz4780_dma_desc(vdesc); | |
501 | jzchan->curr_hwdesc = 0; | |
502 | ||
503 | if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) { | |
504 | /* | |
505 | * The DMA controller doesn't support triggering an | |
506 | * interrupt after processing each descriptor, only | |
507 | * after processing an entire terminated list of | |
508 | * descriptors. For a cyclic DMA setup the list of | |
509 | * descriptors is not terminated so we can never get an | |
510 | * interrupt. | |
511 | * | |
512 | * If the user requested a callback for a cyclic DMA | |
513 | * setup then we workaround this hardware limitation | |
514 | * here by degrading to a set of unlinked descriptors | |
515 | * which we will submit in sequence in response to the | |
516 | * completion of processing the previous descriptor. | |
517 | */ | |
518 | for (i = 0; i < jzchan->desc->count; i++) | |
519 | jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK; | |
520 | } | |
521 | } else { | |
522 | /* | |
523 | * There is an existing transfer, therefore this must be one | |
524 | * for which we unlinked the descriptors above. Advance to the | |
525 | * next one in the list. | |
526 | */ | |
527 | jzchan->curr_hwdesc = | |
528 | (jzchan->curr_hwdesc + 1) % jzchan->desc->count; | |
529 | } | |
530 | ||
29870eb7 PC |
531 | /* Enable the channel's clock. */ |
532 | jz4780_dma_chan_enable(jzdma, jzchan->id); | |
533 | ||
5eed7d84 PC |
534 | /* Use 4-word descriptors. */ |
535 | jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0); | |
536 | ||
537 | /* Set transfer type. */ | |
538 | jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT, | |
76a09663 | 539 | jzchan->desc->transfer_type); |
d894fc60 | 540 | |
9e4e3a4c DS |
541 | /* |
542 | * Set the transfer count. This is redundant for a descriptor-driven | |
543 | * transfer. However, there can be a delay between the transfer start | |
544 | * time and when DTCn reg contains the new transfer count. Setting | |
545 | * it explicitly ensures residue is computed correctly at all times. | |
546 | */ | |
547 | jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC, | |
548 | jzchan->desc->desc[jzchan->curr_hwdesc].dtc); | |
d894fc60 AS |
549 | |
550 | /* Write descriptor address and initiate descriptor fetch. */ | |
551 | desc_phys = jzchan->desc->desc_phys + | |
552 | (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc)); | |
33633583 PC |
553 | jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys); |
554 | jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id)); | |
d894fc60 AS |
555 | |
556 | /* Enable the channel. */ | |
33633583 | 557 | jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, |
5eed7d84 | 558 | JZ_DMA_DCS_CTE); |
d894fc60 AS |
559 | } |
560 | ||
561 | static void jz4780_dma_issue_pending(struct dma_chan *chan) | |
562 | { | |
563 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
564 | unsigned long flags; | |
565 | ||
566 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | |
567 | ||
568 | if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc) | |
569 | jz4780_dma_begin(jzchan); | |
570 | ||
571 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | |
572 | } | |
573 | ||
46fa5168 | 574 | static int jz4780_dma_terminate_all(struct dma_chan *chan) |
d894fc60 | 575 | { |
46fa5168 | 576 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); |
d894fc60 AS |
577 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); |
578 | unsigned long flags; | |
579 | LIST_HEAD(head); | |
580 | ||
581 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | |
582 | ||
583 | /* Clear the DMA status and stop the transfer. */ | |
33633583 | 584 | jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0); |
d894fc60 | 585 | if (jzchan->desc) { |
f0dd52c8 | 586 | vchan_terminate_vdesc(&jzchan->desc->vdesc); |
d894fc60 AS |
587 | jzchan->desc = NULL; |
588 | } | |
589 | ||
29870eb7 PC |
590 | jz4780_dma_chan_disable(jzdma, jzchan->id); |
591 | ||
d894fc60 AS |
592 | vchan_get_all_descriptors(&jzchan->vchan, &head); |
593 | ||
594 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | |
595 | ||
596 | vchan_dma_desc_free_list(&jzchan->vchan, &head); | |
597 | return 0; | |
598 | } | |
599 | ||
f0dd52c8 PU |
600 | static void jz4780_dma_synchronize(struct dma_chan *chan) |
601 | { | |
602 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
29870eb7 | 603 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); |
f0dd52c8 PU |
604 | |
605 | vchan_synchronize(&jzchan->vchan); | |
29870eb7 | 606 | jz4780_dma_chan_disable(jzdma, jzchan->id); |
f0dd52c8 PU |
607 | } |
608 | ||
46fa5168 AS |
609 | static int jz4780_dma_config(struct dma_chan *chan, |
610 | struct dma_slave_config *config) | |
d894fc60 | 611 | { |
46fa5168 AS |
612 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); |
613 | ||
d894fc60 AS |
614 | if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
615 | || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
616 | return -EINVAL; | |
617 | ||
618 | /* Copy the reset of the slave configuration, it is used later. */ | |
619 | memcpy(&jzchan->config, config, sizeof(jzchan->config)); | |
620 | ||
621 | return 0; | |
622 | } | |
623 | ||
624 | static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan, | |
625 | struct jz4780_dma_desc *desc, unsigned int next_sg) | |
626 | { | |
627 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | |
f3c045df | 628 | unsigned int count = 0; |
d894fc60 AS |
629 | unsigned int i; |
630 | ||
d894fc60 | 631 | for (i = next_sg; i < desc->count; i++) |
f3c045df | 632 | count += desc->desc[i].dtc & GENMASK(23, 0); |
d894fc60 | 633 | |
f3c045df DS |
634 | if (next_sg != 0) |
635 | count += jz4780_dma_chn_readl(jzdma, jzchan->id, | |
33633583 | 636 | JZ_DMA_REG_DTC); |
d894fc60 | 637 | |
f3c045df | 638 | return count << jzchan->transfer_shift; |
d894fc60 AS |
639 | } |
640 | ||
641 | static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, | |
642 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
643 | { | |
644 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
645 | struct virt_dma_desc *vdesc; | |
646 | enum dma_status status; | |
647 | unsigned long flags; | |
1f0b0f23 | 648 | unsigned long residue = 0; |
d894fc60 | 649 | |
baf6fd97 PC |
650 | spin_lock_irqsave(&jzchan->vchan.lock, flags); |
651 | ||
d894fc60 AS |
652 | status = dma_cookie_status(chan, cookie, txstate); |
653 | if ((status == DMA_COMPLETE) || (txstate == NULL)) | |
baf6fd97 | 654 | goto out_unlock_irqrestore; |
d894fc60 AS |
655 | |
656 | vdesc = vchan_find_desc(&jzchan->vchan, cookie); | |
657 | if (vdesc) { | |
658 | /* On the issued list, so hasn't been processed yet */ | |
1f0b0f23 | 659 | residue = jz4780_dma_desc_residue(jzchan, |
d894fc60 AS |
660 | to_jz4780_dma_desc(vdesc), 0); |
661 | } else if (cookie == jzchan->desc->vdesc.tx.cookie) { | |
1f0b0f23 | 662 | residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, |
83ef4fb7 | 663 | jzchan->curr_hwdesc + 1); |
1f0b0f23 DS |
664 | } |
665 | dma_set_residue(txstate, residue); | |
d894fc60 AS |
666 | |
667 | if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc | |
839896ef AS |
668 | && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) |
669 | status = DMA_ERROR; | |
d894fc60 | 670 | |
baf6fd97 | 671 | out_unlock_irqrestore: |
d894fc60 AS |
672 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); |
673 | return status; | |
674 | } | |
675 | ||
4e4106f5 PC |
676 | static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, |
677 | struct jz4780_dma_chan *jzchan) | |
d894fc60 | 678 | { |
f4c255f1 PC |
679 | const unsigned int soc_flags = jzdma->soc_data->flags; |
680 | struct jz4780_dma_desc *desc = jzchan->desc; | |
c8c0cda8 | 681 | u32 dcs; |
4e4106f5 | 682 | bool ack = true; |
d894fc60 AS |
683 | |
684 | spin_lock(&jzchan->vchan.lock); | |
685 | ||
33633583 PC |
686 | dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS); |
687 | jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0); | |
d894fc60 AS |
688 | |
689 | if (dcs & JZ_DMA_DCS_AR) { | |
690 | dev_warn(&jzchan->vchan.chan.dev->device, | |
691 | "address error (DCS=0x%x)\n", dcs); | |
692 | } | |
693 | ||
694 | if (dcs & JZ_DMA_DCS_HLT) { | |
695 | dev_warn(&jzchan->vchan.chan.dev->device, | |
696 | "channel halt (DCS=0x%x)\n", dcs); | |
697 | } | |
698 | ||
699 | if (jzchan->desc) { | |
700 | jzchan->desc->status = dcs; | |
701 | ||
702 | if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { | |
703 | if (jzchan->desc->type == DMA_CYCLIC) { | |
704 | vchan_cyclic_callback(&jzchan->desc->vdesc); | |
4e4106f5 PC |
705 | |
706 | jz4780_dma_begin(jzchan); | |
707 | } else if (dcs & JZ_DMA_DCS_TT) { | |
f4c255f1 PC |
708 | if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) || |
709 | (jzchan->curr_hwdesc + 1 == desc->count)) { | |
710 | vchan_cookie_complete(&desc->vdesc); | |
711 | jzchan->desc = NULL; | |
712 | } | |
d894fc60 | 713 | |
4e4106f5 PC |
714 | jz4780_dma_begin(jzchan); |
715 | } else { | |
716 | /* False positive - continue the transfer */ | |
717 | ack = false; | |
718 | jz4780_dma_chn_writel(jzdma, jzchan->id, | |
719 | JZ_DMA_REG_DCS, | |
720 | JZ_DMA_DCS_CTE); | |
721 | } | |
d894fc60 AS |
722 | } |
723 | } else { | |
724 | dev_err(&jzchan->vchan.chan.dev->device, | |
725 | "channel IRQ with no active transfer\n"); | |
726 | } | |
727 | ||
728 | spin_unlock(&jzchan->vchan.lock); | |
4e4106f5 PC |
729 | |
730 | return ack; | |
d894fc60 AS |
731 | } |
732 | ||
733 | static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) | |
734 | { | |
735 | struct jz4780_dma_dev *jzdma = data; | |
4e4106f5 | 736 | unsigned int nb_channels = jzdma->soc_data->nb_channels; |
4c89cc73 | 737 | unsigned long pending; |
c8c0cda8 | 738 | u32 dmac; |
d894fc60 AS |
739 | int i; |
740 | ||
33633583 | 741 | pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); |
d894fc60 | 742 | |
4c89cc73 | 743 | for_each_set_bit(i, &pending, nb_channels) { |
4e4106f5 PC |
744 | if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i])) |
745 | pending &= ~BIT(i); | |
d894fc60 AS |
746 | } |
747 | ||
748 | /* Clear halt and address error status of all channels. */ | |
33633583 | 749 | dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC); |
d894fc60 | 750 | dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR); |
33633583 | 751 | jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac); |
d894fc60 AS |
752 | |
753 | /* Clear interrupt pending status. */ | |
4e4106f5 | 754 | jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending); |
d894fc60 AS |
755 | |
756 | return IRQ_HANDLED; | |
757 | } | |
758 | ||
759 | static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan) | |
760 | { | |
761 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
762 | ||
763 | jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device), | |
764 | chan->device->dev, | |
765 | JZ_DMA_DESC_BLOCK_SIZE, | |
766 | PAGE_SIZE, 0); | |
767 | if (!jzchan->desc_pool) { | |
768 | dev_err(&chan->dev->device, | |
769 | "failed to allocate descriptor pool\n"); | |
770 | return -ENOMEM; | |
771 | } | |
772 | ||
773 | return 0; | |
774 | } | |
775 | ||
776 | static void jz4780_dma_free_chan_resources(struct dma_chan *chan) | |
777 | { | |
778 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
779 | ||
780 | vchan_free_chan_resources(&jzchan->vchan); | |
781 | dma_pool_destroy(jzchan->desc_pool); | |
782 | jzchan->desc_pool = NULL; | |
783 | } | |
784 | ||
785 | static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) | |
786 | { | |
787 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
788 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | |
026fd406 AS |
789 | struct jz4780_dma_filter_data *data = param; |
790 | ||
d894fc60 AS |
791 | |
792 | if (data->channel > -1) { | |
793 | if (data->channel != jzchan->id) | |
794 | return false; | |
795 | } else if (jzdma->chan_reserved & BIT(jzchan->id)) { | |
796 | return false; | |
797 | } | |
798 | ||
76a09663 PC |
799 | jzchan->transfer_type_tx = data->transfer_type_tx; |
800 | jzchan->transfer_type_rx = data->transfer_type_rx; | |
d894fc60 AS |
801 | |
802 | return true; | |
803 | } | |
804 | ||
805 | static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, | |
806 | struct of_dma *ofdma) | |
807 | { | |
808 | struct jz4780_dma_dev *jzdma = ofdma->of_dma_data; | |
809 | dma_cap_mask_t mask = jzdma->dma_device.cap_mask; | |
026fd406 | 810 | struct jz4780_dma_filter_data data; |
d894fc60 | 811 | |
76a09663 PC |
812 | if (dma_spec->args_count == 2) { |
813 | data.transfer_type_tx = dma_spec->args[0]; | |
814 | data.transfer_type_rx = dma_spec->args[0]; | |
815 | data.channel = dma_spec->args[1]; | |
816 | } else if (dma_spec->args_count == 3) { | |
817 | data.transfer_type_tx = dma_spec->args[0]; | |
818 | data.transfer_type_rx = dma_spec->args[1]; | |
819 | data.channel = dma_spec->args[2]; | |
820 | } else { | |
d894fc60 | 821 | return NULL; |
76a09663 | 822 | } |
d894fc60 AS |
823 | |
824 | if (data.channel > -1) { | |
6147b032 | 825 | if (data.channel >= jzdma->soc_data->nb_channels) { |
d894fc60 AS |
826 | dev_err(jzdma->dma_device.dev, |
827 | "device requested non-existent channel %u\n", | |
828 | data.channel); | |
829 | return NULL; | |
830 | } | |
831 | ||
832 | /* Can only select a channel marked as reserved. */ | |
833 | if (!(jzdma->chan_reserved & BIT(data.channel))) { | |
834 | dev_err(jzdma->dma_device.dev, | |
835 | "device requested unreserved channel %u\n", | |
836 | data.channel); | |
837 | return NULL; | |
838 | } | |
d894fc60 | 839 | |
76a09663 PC |
840 | jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx; |
841 | jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx; | |
d3273e10 AS |
842 | |
843 | return dma_get_slave_channel( | |
844 | &jzdma->chan[data.channel].vchan.chan); | |
845 | } else { | |
c88ba7b9 BW |
846 | return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data, |
847 | ofdma->of_node); | |
d3273e10 | 848 | } |
d894fc60 AS |
849 | } |
850 | ||
851 | static int jz4780_dma_probe(struct platform_device *pdev) | |
852 | { | |
853 | struct device *dev = &pdev->dev; | |
6147b032 | 854 | const struct jz4780_dma_soc_data *soc_data; |
d894fc60 AS |
855 | struct jz4780_dma_dev *jzdma; |
856 | struct jz4780_dma_chan *jzchan; | |
857 | struct dma_device *dd; | |
858 | struct resource *res; | |
859 | int i, ret; | |
860 | ||
54f919a0 PC |
861 | if (!dev->of_node) { |
862 | dev_err(dev, "This driver must be probed from devicetree\n"); | |
863 | return -EINVAL; | |
864 | } | |
865 | ||
6147b032 PC |
866 | soc_data = device_get_match_data(dev); |
867 | if (!soc_data) | |
868 | return -EINVAL; | |
869 | ||
ed414d58 GS |
870 | jzdma = devm_kzalloc(dev, struct_size(jzdma, chan, |
871 | soc_data->nb_channels), GFP_KERNEL); | |
d894fc60 AS |
872 | if (!jzdma) |
873 | return -ENOMEM; | |
874 | ||
6147b032 | 875 | jzdma->soc_data = soc_data; |
d894fc60 AS |
876 | platform_set_drvdata(pdev, jzdma); |
877 | ||
1148ac67 | 878 | jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0); |
33633583 PC |
879 | if (IS_ERR(jzdma->chn_base)) |
880 | return PTR_ERR(jzdma->chn_base); | |
881 | ||
882 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
883 | if (res) { | |
884 | jzdma->ctrl_base = devm_ioremap_resource(dev, res); | |
885 | if (IS_ERR(jzdma->ctrl_base)) | |
886 | return PTR_ERR(jzdma->ctrl_base); | |
29870eb7 | 887 | } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) { |
33633583 PC |
888 | /* |
889 | * On JZ4780, if the second memory resource was not supplied, | |
890 | * assume we're using an old devicetree, and calculate the | |
891 | * offset to the control registers. | |
892 | */ | |
893 | jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET; | |
29870eb7 PC |
894 | } else { |
895 | dev_err(dev, "failed to get I/O memory\n"); | |
896 | return -EINVAL; | |
33633583 | 897 | } |
d894fc60 | 898 | |
d894fc60 AS |
899 | jzdma->clk = devm_clk_get(dev, NULL); |
900 | if (IS_ERR(jzdma->clk)) { | |
901 | dev_err(dev, "failed to get clock\n"); | |
d509a83c | 902 | ret = PTR_ERR(jzdma->clk); |
6d6018fc | 903 | return ret; |
d894fc60 AS |
904 | } |
905 | ||
906 | clk_prepare_enable(jzdma->clk); | |
907 | ||
908 | /* Property is optional, if it doesn't exist the value will remain 0. */ | |
909 | of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels", | |
910 | 0, &jzdma->chan_reserved); | |
911 | ||
912 | dd = &jzdma->dma_device; | |
913 | ||
2128565a AM |
914 | /* |
915 | * The real segment size limit is dependent on the size unit selected | |
916 | * for the transfer. Because the size unit is selected automatically | |
917 | * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to | |
918 | * ensure the 24-bit transfer count in the descriptor cannot overflow. | |
919 | */ | |
920 | dma_set_max_seg_size(dev, 0xffffff); | |
921 | ||
d894fc60 AS |
922 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); |
923 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | |
924 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | |
925 | ||
926 | dd->dev = dev; | |
77a68e56 | 927 | dd->copy_align = DMAENGINE_ALIGN_4_BYTES; |
d894fc60 AS |
928 | dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources; |
929 | dd->device_free_chan_resources = jz4780_dma_free_chan_resources; | |
930 | dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg; | |
931 | dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic; | |
932 | dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; | |
46fa5168 | 933 | dd->device_config = jz4780_dma_config; |
d894fc60 | 934 | dd->device_terminate_all = jz4780_dma_terminate_all; |
f0dd52c8 | 935 | dd->device_synchronize = jz4780_dma_synchronize; |
d894fc60 AS |
936 | dd->device_tx_status = jz4780_dma_tx_status; |
937 | dd->device_issue_pending = jz4780_dma_issue_pending; | |
938 | dd->src_addr_widths = JZ_DMA_BUSWIDTHS; | |
939 | dd->dst_addr_widths = JZ_DMA_BUSWIDTHS; | |
940 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
941 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
d59f7037 | 942 | dd->max_sg_burst = JZ_DMA_MAX_DESC; |
d894fc60 | 943 | |
d894fc60 AS |
944 | /* |
945 | * Enable DMA controller, mark all channels as not programmable. | |
946 | * Also set the FMSC bit - it increases MSC performance, so it makes | |
947 | * little sense not to enable it. | |
948 | */ | |
17a8e30e PC |
949 | jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE | |
950 | JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC); | |
29870eb7 PC |
951 | |
952 | if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA) | |
953 | jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0); | |
d894fc60 AS |
954 | |
955 | INIT_LIST_HEAD(&dd->channels); | |
956 | ||
6147b032 | 957 | for (i = 0; i < soc_data->nb_channels; i++) { |
d894fc60 AS |
958 | jzchan = &jzdma->chan[i]; |
959 | jzchan->id = i; | |
960 | ||
961 | vchan_init(&jzchan->vchan, dd); | |
962 | jzchan->vchan.desc_free = jz4780_dma_desc_free; | |
963 | } | |
964 | ||
b72cbb1a PC |
965 | /* |
966 | * On JZ4760, chan0 won't enable properly the first time. | |
967 | * Enabling then disabling chan1 will magically make chan0 work | |
968 | * correctly. | |
969 | */ | |
970 | jz4780_dma_chan_enable(jzdma, 1); | |
971 | jz4780_dma_chan_disable(jzdma, 1); | |
972 | ||
6d6018fc MB |
973 | ret = platform_get_irq(pdev, 0); |
974 | if (ret < 0) | |
975 | goto err_disable_clk; | |
976 | ||
977 | jzdma->irq = ret; | |
978 | ||
979 | ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), | |
980 | jzdma); | |
981 | if (ret) { | |
982 | dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); | |
983 | goto err_disable_clk; | |
984 | } | |
985 | ||
0f5a5e57 | 986 | ret = dmaenginem_async_device_register(dd); |
d894fc60 AS |
987 | if (ret) { |
988 | dev_err(dev, "failed to register device\n"); | |
6d6018fc | 989 | goto err_free_irq; |
d894fc60 AS |
990 | } |
991 | ||
992 | /* Register with OF DMA helpers. */ | |
993 | ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate, | |
994 | jzdma); | |
995 | if (ret) { | |
996 | dev_err(dev, "failed to register OF DMA controller\n"); | |
6d6018fc | 997 | goto err_free_irq; |
d894fc60 AS |
998 | } |
999 | ||
1000 | dev_info(dev, "JZ4780 DMA controller initialised\n"); | |
1001 | return 0; | |
1002 | ||
d509a83c AS |
1003 | err_free_irq: |
1004 | free_irq(jzdma->irq, jzdma); | |
6d6018fc MB |
1005 | |
1006 | err_disable_clk: | |
1007 | clk_disable_unprepare(jzdma->clk); | |
d894fc60 AS |
1008 | return ret; |
1009 | } | |
1010 | ||
a8c85540 | 1011 | static void jz4780_dma_remove(struct platform_device *pdev) |
d894fc60 AS |
1012 | { |
1013 | struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev); | |
ae9c02b4 | 1014 | int i; |
d894fc60 AS |
1015 | |
1016 | of_dma_controller_free(pdev->dev.of_node); | |
ae9c02b4 | 1017 | |
9568feda | 1018 | clk_disable_unprepare(jzdma->clk); |
d509a83c | 1019 | free_irq(jzdma->irq, jzdma); |
ae9c02b4 | 1020 | |
6147b032 | 1021 | for (i = 0; i < jzdma->soc_data->nb_channels; i++) |
ae9c02b4 | 1022 | tasklet_kill(&jzdma->chan[i].vchan.task); |
d894fc60 AS |
1023 | } |
1024 | ||
ffaaa8cc PC |
1025 | static const struct jz4780_dma_soc_data jz4740_dma_soc_data = { |
1026 | .nb_channels = 6, | |
1027 | .transfer_ord_max = 5, | |
f4c255f1 | 1028 | .flags = JZ_SOC_DATA_BREAK_LINKS, |
ffaaa8cc PC |
1029 | }; |
1030 | ||
ae9156b6 PC |
1031 | static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = { |
1032 | .nb_channels = 6, | |
1033 | .transfer_ord_max = 5, | |
a40c94be PC |
1034 | .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | |
1035 | JZ_SOC_DATA_BREAK_LINKS, | |
ae9156b6 PC |
1036 | }; |
1037 | ||
042427ea SV |
1038 | static const struct jz4780_dma_soc_data jz4755_dma_soc_data = { |
1039 | .nb_channels = 4, | |
1040 | .transfer_ord_max = 5, | |
1041 | .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC | | |
1042 | JZ_SOC_DATA_BREAK_LINKS, | |
1043 | }; | |
1044 | ||
d2852a3e PC |
1045 | static const struct jz4780_dma_soc_data jz4760_dma_soc_data = { |
1046 | .nb_channels = 5, | |
1047 | .transfer_ord_max = 6, | |
1048 | .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, | |
1049 | }; | |
1050 | ||
3d70fccf PC |
1051 | static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = { |
1052 | .nb_channels = 2, | |
1053 | .transfer_ord_max = 6, | |
1054 | .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, | |
1055 | }; | |
1056 | ||
1057 | static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = { | |
1058 | .nb_channels = 3, | |
1059 | .transfer_ord_max = 6, | |
1060 | .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC, | |
1061 | }; | |
1062 | ||
d2852a3e PC |
1063 | static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = { |
1064 | .nb_channels = 5, | |
1065 | .transfer_ord_max = 6, | |
1066 | .flags = JZ_SOC_DATA_PER_CHAN_PM, | |
1067 | }; | |
1068 | ||
3d70fccf PC |
1069 | static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = { |
1070 | .nb_channels = 2, | |
1071 | .transfer_ord_max = 6, | |
1072 | .flags = JZ_SOC_DATA_PER_CHAN_PM, | |
1073 | }; | |
1074 | ||
1075 | static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = { | |
1076 | .nb_channels = 3, | |
1077 | .transfer_ord_max = 6, | |
1078 | .flags = JZ_SOC_DATA_PER_CHAN_PM, | |
1079 | }; | |
1080 | ||
29870eb7 PC |
1081 | static const struct jz4780_dma_soc_data jz4770_dma_soc_data = { |
1082 | .nb_channels = 6, | |
1083 | .transfer_ord_max = 6, | |
1084 | .flags = JZ_SOC_DATA_PER_CHAN_PM, | |
1085 | }; | |
1086 | ||
6147b032 PC |
1087 | static const struct jz4780_dma_soc_data jz4780_dma_soc_data = { |
1088 | .nb_channels = 32, | |
29870eb7 PC |
1089 | .transfer_ord_max = 7, |
1090 | .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA, | |
6147b032 PC |
1091 | }; |
1092 | ||
fee175e4 ZY |
1093 | static const struct jz4780_dma_soc_data x1000_dma_soc_data = { |
1094 | .nb_channels = 8, | |
1095 | .transfer_ord_max = 7, | |
1096 | .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, | |
1097 | }; | |
1098 | ||
20f5a659 ZY |
1099 | static const struct jz4780_dma_soc_data x1830_dma_soc_data = { |
1100 | .nb_channels = 32, | |
1101 | .transfer_ord_max = 7, | |
1102 | .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, | |
1103 | }; | |
1104 | ||
d894fc60 | 1105 | static const struct of_device_id jz4780_dma_dt_match[] = { |
ffaaa8cc | 1106 | { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, |
ae9156b6 | 1107 | { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, |
042427ea | 1108 | { .compatible = "ingenic,jz4755-dma", .data = &jz4755_dma_soc_data }, |
d2852a3e | 1109 | { .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data }, |
3d70fccf PC |
1110 | { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data }, |
1111 | { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data }, | |
d2852a3e | 1112 | { .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data }, |
3d70fccf PC |
1113 | { .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data }, |
1114 | { .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data }, | |
29870eb7 | 1115 | { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, |
6147b032 | 1116 | { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, |
fee175e4 | 1117 | { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, |
20f5a659 | 1118 | { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data }, |
d894fc60 AS |
1119 | {}, |
1120 | }; | |
1121 | MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); | |
1122 | ||
1123 | static struct platform_driver jz4780_dma_driver = { | |
1124 | .probe = jz4780_dma_probe, | |
76355c25 | 1125 | .remove = jz4780_dma_remove, |
d894fc60 AS |
1126 | .driver = { |
1127 | .name = "jz4780-dma", | |
255c2cc8 | 1128 | .of_match_table = jz4780_dma_dt_match, |
d894fc60 AS |
1129 | }, |
1130 | }; | |
1131 | ||
1132 | static int __init jz4780_dma_init(void) | |
1133 | { | |
1134 | return platform_driver_register(&jz4780_dma_driver); | |
1135 | } | |
1136 | subsys_initcall(jz4780_dma_init); | |
1137 | ||
1138 | static void __exit jz4780_dma_exit(void) | |
1139 | { | |
1140 | platform_driver_unregister(&jz4780_dma_driver); | |
1141 | } | |
1142 | module_exit(jz4780_dma_exit); | |
1143 | ||
1144 | MODULE_AUTHOR("Alex Smith <[email protected]>"); | |
1145 | MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver"); | |
1146 | MODULE_LICENSE("GPL"); |