]>
Commit | Line | Data |
---|---|---|
d894fc60 AS |
1 | /* |
2 | * Ingenic JZ4780 DMA controller | |
3 | * | |
4 | * Copyright (c) 2015 Imagination Technologies | |
5 | * Author: Alex Smith <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License as published by the | |
9 | * Free Software Foundation; either version 2 of the License, or (at your | |
10 | * option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/clk.h> | |
14 | #include <linux/dmapool.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/of_dma.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/slab.h> | |
22 | ||
23 | #include "dmaengine.h" | |
24 | #include "virt-dma.h" | |
25 | ||
26 | #define JZ_DMA_NR_CHANNELS 32 | |
27 | ||
28 | /* Global registers. */ | |
29 | #define JZ_DMA_REG_DMAC 0x1000 | |
30 | #define JZ_DMA_REG_DIRQP 0x1004 | |
31 | #define JZ_DMA_REG_DDR 0x1008 | |
32 | #define JZ_DMA_REG_DDRS 0x100c | |
33 | #define JZ_DMA_REG_DMACP 0x101c | |
34 | #define JZ_DMA_REG_DSIRQP 0x1020 | |
35 | #define JZ_DMA_REG_DSIRQM 0x1024 | |
36 | #define JZ_DMA_REG_DCIRQP 0x1028 | |
37 | #define JZ_DMA_REG_DCIRQM 0x102c | |
38 | ||
39 | /* Per-channel registers. */ | |
40 | #define JZ_DMA_REG_CHAN(n) (n * 0x20) | |
41 | #define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n)) | |
42 | #define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n)) | |
43 | #define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n)) | |
44 | #define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n)) | |
45 | #define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n)) | |
46 | #define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n)) | |
47 | #define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n)) | |
48 | #define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n)) | |
49 | ||
50 | #define JZ_DMA_DMAC_DMAE BIT(0) | |
51 | #define JZ_DMA_DMAC_AR BIT(2) | |
52 | #define JZ_DMA_DMAC_HLT BIT(3) | |
53 | #define JZ_DMA_DMAC_FMSC BIT(31) | |
54 | ||
55 | #define JZ_DMA_DRT_AUTO 0x8 | |
56 | ||
57 | #define JZ_DMA_DCS_CTE BIT(0) | |
58 | #define JZ_DMA_DCS_HLT BIT(2) | |
59 | #define JZ_DMA_DCS_TT BIT(3) | |
60 | #define JZ_DMA_DCS_AR BIT(4) | |
61 | #define JZ_DMA_DCS_DES8 BIT(30) | |
62 | ||
63 | #define JZ_DMA_DCM_LINK BIT(0) | |
64 | #define JZ_DMA_DCM_TIE BIT(1) | |
65 | #define JZ_DMA_DCM_STDE BIT(2) | |
66 | #define JZ_DMA_DCM_TSZ_SHIFT 8 | |
67 | #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT) | |
68 | #define JZ_DMA_DCM_DP_SHIFT 12 | |
69 | #define JZ_DMA_DCM_SP_SHIFT 14 | |
70 | #define JZ_DMA_DCM_DAI BIT(22) | |
71 | #define JZ_DMA_DCM_SAI BIT(23) | |
72 | ||
73 | #define JZ_DMA_SIZE_4_BYTE 0x0 | |
74 | #define JZ_DMA_SIZE_1_BYTE 0x1 | |
75 | #define JZ_DMA_SIZE_2_BYTE 0x2 | |
76 | #define JZ_DMA_SIZE_16_BYTE 0x3 | |
77 | #define JZ_DMA_SIZE_32_BYTE 0x4 | |
78 | #define JZ_DMA_SIZE_64_BYTE 0x5 | |
79 | #define JZ_DMA_SIZE_128_BYTE 0x6 | |
80 | ||
81 | #define JZ_DMA_WIDTH_32_BIT 0x0 | |
82 | #define JZ_DMA_WIDTH_8_BIT 0x1 | |
83 | #define JZ_DMA_WIDTH_16_BIT 0x2 | |
84 | ||
85 | #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
86 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
87 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
88 | ||
89 | /** | |
90 | * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller. | |
91 | * @dcm: value for the DCM (channel command) register | |
92 | * @dsa: source address | |
93 | * @dta: target address | |
94 | * @dtc: transfer count (number of blocks of the transfer size specified in DCM | |
95 | * to transfer) in the low 24 bits, offset of the next descriptor from the | |
96 | * descriptor base address in the upper 8 bits. | |
97 | * @sd: target/source stride difference (in stride transfer mode). | |
98 | * @drt: request type | |
99 | */ | |
100 | struct jz4780_dma_hwdesc { | |
101 | uint32_t dcm; | |
102 | uint32_t dsa; | |
103 | uint32_t dta; | |
104 | uint32_t dtc; | |
105 | uint32_t sd; | |
106 | uint32_t drt; | |
107 | uint32_t reserved[2]; | |
108 | }; | |
109 | ||
110 | /* Size of allocations for hardware descriptor blocks. */ | |
111 | #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE | |
112 | #define JZ_DMA_MAX_DESC \ | |
113 | (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc)) | |
114 | ||
115 | struct jz4780_dma_desc { | |
116 | struct virt_dma_desc vdesc; | |
117 | ||
118 | struct jz4780_dma_hwdesc *desc; | |
119 | dma_addr_t desc_phys; | |
120 | unsigned int count; | |
121 | enum dma_transaction_type type; | |
122 | uint32_t status; | |
123 | }; | |
124 | ||
125 | struct jz4780_dma_chan { | |
126 | struct virt_dma_chan vchan; | |
127 | unsigned int id; | |
128 | struct dma_pool *desc_pool; | |
129 | ||
130 | uint32_t transfer_type; | |
131 | uint32_t transfer_shift; | |
132 | struct dma_slave_config config; | |
133 | ||
134 | struct jz4780_dma_desc *desc; | |
135 | unsigned int curr_hwdesc; | |
136 | }; | |
137 | ||
138 | struct jz4780_dma_dev { | |
139 | struct dma_device dma_device; | |
140 | void __iomem *base; | |
141 | struct clk *clk; | |
142 | unsigned int irq; | |
143 | ||
144 | uint32_t chan_reserved; | |
145 | struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS]; | |
146 | }; | |
147 | ||
026fd406 AS |
148 | struct jz4780_dma_filter_data { |
149 | struct device_node *of_node; | |
d894fc60 AS |
150 | uint32_t transfer_type; |
151 | int channel; | |
152 | }; | |
153 | ||
154 | static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan) | |
155 | { | |
156 | return container_of(chan, struct jz4780_dma_chan, vchan.chan); | |
157 | } | |
158 | ||
159 | static inline struct jz4780_dma_desc *to_jz4780_dma_desc( | |
160 | struct virt_dma_desc *vdesc) | |
161 | { | |
162 | return container_of(vdesc, struct jz4780_dma_desc, vdesc); | |
163 | } | |
164 | ||
165 | static inline struct jz4780_dma_dev *jz4780_dma_chan_parent( | |
166 | struct jz4780_dma_chan *jzchan) | |
167 | { | |
168 | return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev, | |
169 | dma_device); | |
170 | } | |
171 | ||
172 | static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma, | |
173 | unsigned int reg) | |
174 | { | |
175 | return readl(jzdma->base + reg); | |
176 | } | |
177 | ||
178 | static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma, | |
179 | unsigned int reg, uint32_t val) | |
180 | { | |
181 | writel(val, jzdma->base + reg); | |
182 | } | |
183 | ||
184 | static struct jz4780_dma_desc *jz4780_dma_desc_alloc( | |
185 | struct jz4780_dma_chan *jzchan, unsigned int count, | |
186 | enum dma_transaction_type type) | |
187 | { | |
188 | struct jz4780_dma_desc *desc; | |
189 | ||
190 | if (count > JZ_DMA_MAX_DESC) | |
191 | return NULL; | |
192 | ||
193 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | |
194 | if (!desc) | |
195 | return NULL; | |
196 | ||
197 | desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT, | |
198 | &desc->desc_phys); | |
199 | if (!desc->desc) { | |
200 | kfree(desc); | |
201 | return NULL; | |
202 | } | |
203 | ||
204 | desc->count = count; | |
205 | desc->type = type; | |
206 | return desc; | |
207 | } | |
208 | ||
209 | static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc) | |
210 | { | |
211 | struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc); | |
212 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan); | |
213 | ||
214 | dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys); | |
215 | kfree(desc); | |
216 | } | |
217 | ||
dc578f31 | 218 | static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift) |
d894fc60 | 219 | { |
dc578f31 | 220 | int ord = ffs(val) - 1; |
d894fc60 | 221 | |
dc578f31 AS |
222 | /* |
223 | * 8 byte transfer sizes unsupported so fall back on 4. If it's larger | |
224 | * than the maximum, just limit it. It is perfectly safe to fall back | |
225 | * in this way since we won't exceed the maximum burst size supported | |
226 | * by the device, the only effect is reduced efficiency. This is better | |
227 | * than refusing to perform the request at all. | |
228 | */ | |
229 | if (ord == 3) | |
230 | ord = 2; | |
231 | else if (ord > 7) | |
232 | ord = 7; | |
233 | ||
234 | *shift = ord; | |
235 | ||
236 | switch (ord) { | |
d894fc60 AS |
237 | case 0: |
238 | return JZ_DMA_SIZE_1_BYTE; | |
239 | case 1: | |
240 | return JZ_DMA_SIZE_2_BYTE; | |
241 | case 2: | |
242 | return JZ_DMA_SIZE_4_BYTE; | |
243 | case 4: | |
244 | return JZ_DMA_SIZE_16_BYTE; | |
245 | case 5: | |
246 | return JZ_DMA_SIZE_32_BYTE; | |
247 | case 6: | |
248 | return JZ_DMA_SIZE_64_BYTE; | |
d894fc60 | 249 | default: |
dc578f31 | 250 | return JZ_DMA_SIZE_128_BYTE; |
d894fc60 AS |
251 | } |
252 | } | |
253 | ||
839896ef | 254 | static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, |
d894fc60 AS |
255 | struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, |
256 | enum dma_transfer_direction direction) | |
257 | { | |
258 | struct dma_slave_config *config = &jzchan->config; | |
259 | uint32_t width, maxburst, tsz; | |
d894fc60 AS |
260 | |
261 | if (direction == DMA_MEM_TO_DEV) { | |
262 | desc->dcm = JZ_DMA_DCM_SAI; | |
263 | desc->dsa = addr; | |
264 | desc->dta = config->dst_addr; | |
265 | desc->drt = jzchan->transfer_type; | |
266 | ||
267 | width = config->dst_addr_width; | |
268 | maxburst = config->dst_maxburst; | |
269 | } else { | |
270 | desc->dcm = JZ_DMA_DCM_DAI; | |
271 | desc->dsa = config->src_addr; | |
272 | desc->dta = addr; | |
273 | desc->drt = jzchan->transfer_type; | |
274 | ||
275 | width = config->src_addr_width; | |
276 | maxburst = config->src_maxburst; | |
277 | } | |
278 | ||
279 | /* | |
280 | * This calculates the maximum transfer size that can be used with the | |
281 | * given address, length, width and maximum burst size. The address | |
282 | * must be aligned to the transfer size, the total length must be | |
283 | * divisible by the transfer size, and we must not use more than the | |
284 | * maximum burst specified by the user. | |
285 | */ | |
dc578f31 AS |
286 | tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), |
287 | &jzchan->transfer_shift); | |
d894fc60 AS |
288 | |
289 | switch (width) { | |
290 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
291 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
292 | break; | |
293 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
294 | width = JZ_DMA_WIDTH_32_BIT; | |
295 | break; | |
296 | default: | |
297 | return -EINVAL; | |
298 | } | |
299 | ||
300 | desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT; | |
301 | desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT; | |
302 | desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT; | |
303 | ||
dc578f31 | 304 | desc->dtc = len >> jzchan->transfer_shift; |
839896ef | 305 | return 0; |
d894fc60 AS |
306 | } |
307 | ||
308 | static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( | |
309 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | |
46fa5168 AS |
310 | enum dma_transfer_direction direction, unsigned long flags, |
311 | void *context) | |
d894fc60 AS |
312 | { |
313 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
314 | struct jz4780_dma_desc *desc; | |
315 | unsigned int i; | |
316 | int err; | |
317 | ||
318 | desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE); | |
319 | if (!desc) | |
320 | return NULL; | |
321 | ||
322 | for (i = 0; i < sg_len; i++) { | |
323 | err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], | |
839896ef AS |
324 | sg_dma_address(&sgl[i]), |
325 | sg_dma_len(&sgl[i]), | |
326 | direction); | |
fc878efe CIK |
327 | if (err < 0) { |
328 | jz4780_dma_desc_free(&jzchan->desc->vdesc); | |
839896ef | 329 | return NULL; |
fc878efe | 330 | } |
d894fc60 AS |
331 | |
332 | desc->desc[i].dcm |= JZ_DMA_DCM_TIE; | |
333 | ||
334 | if (i != (sg_len - 1)) { | |
335 | /* Automatically proceeed to the next descriptor. */ | |
336 | desc->desc[i].dcm |= JZ_DMA_DCM_LINK; | |
337 | ||
338 | /* | |
339 | * The upper 8 bits of the DTC field in the descriptor | |
340 | * must be set to (offset from descriptor base of next | |
341 | * descriptor >> 4). | |
342 | */ | |
343 | desc->desc[i].dtc |= | |
344 | (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; | |
345 | } | |
346 | } | |
347 | ||
348 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | |
349 | } | |
350 | ||
351 | static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( | |
352 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
353 | size_t period_len, enum dma_transfer_direction direction, | |
354 | unsigned long flags) | |
355 | { | |
356 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
357 | struct jz4780_dma_desc *desc; | |
358 | unsigned int periods, i; | |
359 | int err; | |
360 | ||
361 | if (buf_len % period_len) | |
362 | return NULL; | |
363 | ||
364 | periods = buf_len / period_len; | |
365 | ||
366 | desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC); | |
367 | if (!desc) | |
368 | return NULL; | |
369 | ||
370 | for (i = 0; i < periods; i++) { | |
371 | err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr, | |
839896ef | 372 | period_len, direction); |
fc878efe CIK |
373 | if (err < 0) { |
374 | jz4780_dma_desc_free(&jzchan->desc->vdesc); | |
839896ef | 375 | return NULL; |
fc878efe | 376 | } |
d894fc60 AS |
377 | |
378 | buf_addr += period_len; | |
379 | ||
380 | /* | |
381 | * Set the link bit to indicate that the controller should | |
382 | * automatically proceed to the next descriptor. In | |
383 | * jz4780_dma_begin(), this will be cleared if we need to issue | |
384 | * an interrupt after each period. | |
385 | */ | |
386 | desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK; | |
387 | ||
388 | /* | |
389 | * The upper 8 bits of the DTC field in the descriptor must be | |
390 | * set to (offset from descriptor base of next descriptor >> 4). | |
391 | * If this is the last descriptor, link it back to the first, | |
392 | * i.e. leave offset set to 0, otherwise point to the next one. | |
393 | */ | |
394 | if (i != (periods - 1)) { | |
395 | desc->desc[i].dtc |= | |
396 | (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; | |
397 | } | |
398 | } | |
399 | ||
400 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | |
401 | } | |
402 | ||
4f5db8c8 | 403 | static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( |
d894fc60 AS |
404 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
405 | size_t len, unsigned long flags) | |
406 | { | |
407 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
408 | struct jz4780_dma_desc *desc; | |
409 | uint32_t tsz; | |
d894fc60 AS |
410 | |
411 | desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); | |
412 | if (!desc) | |
413 | return NULL; | |
414 | ||
dc578f31 AS |
415 | tsz = jz4780_dma_transfer_size(dest | src | len, |
416 | &jzchan->transfer_shift); | |
d894fc60 AS |
417 | |
418 | desc->desc[0].dsa = src; | |
419 | desc->desc[0].dta = dest; | |
420 | desc->desc[0].drt = JZ_DMA_DRT_AUTO; | |
421 | desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI | | |
422 | tsz << JZ_DMA_DCM_TSZ_SHIFT | | |
423 | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT | | |
424 | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT; | |
839896ef | 425 | desc->desc[0].dtc = len >> jzchan->transfer_shift; |
d894fc60 AS |
426 | |
427 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | |
428 | } | |
429 | ||
430 | static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan) | |
431 | { | |
432 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | |
433 | struct virt_dma_desc *vdesc; | |
434 | unsigned int i; | |
435 | dma_addr_t desc_phys; | |
436 | ||
437 | if (!jzchan->desc) { | |
438 | vdesc = vchan_next_desc(&jzchan->vchan); | |
439 | if (!vdesc) | |
440 | return; | |
441 | ||
442 | list_del(&vdesc->node); | |
443 | ||
444 | jzchan->desc = to_jz4780_dma_desc(vdesc); | |
445 | jzchan->curr_hwdesc = 0; | |
446 | ||
447 | if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) { | |
448 | /* | |
449 | * The DMA controller doesn't support triggering an | |
450 | * interrupt after processing each descriptor, only | |
451 | * after processing an entire terminated list of | |
452 | * descriptors. For a cyclic DMA setup the list of | |
453 | * descriptors is not terminated so we can never get an | |
454 | * interrupt. | |
455 | * | |
456 | * If the user requested a callback for a cyclic DMA | |
457 | * setup then we workaround this hardware limitation | |
458 | * here by degrading to a set of unlinked descriptors | |
459 | * which we will submit in sequence in response to the | |
460 | * completion of processing the previous descriptor. | |
461 | */ | |
462 | for (i = 0; i < jzchan->desc->count; i++) | |
463 | jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK; | |
464 | } | |
465 | } else { | |
466 | /* | |
467 | * There is an existing transfer, therefore this must be one | |
468 | * for which we unlinked the descriptors above. Advance to the | |
469 | * next one in the list. | |
470 | */ | |
471 | jzchan->curr_hwdesc = | |
472 | (jzchan->curr_hwdesc + 1) % jzchan->desc->count; | |
473 | } | |
474 | ||
475 | /* Use 8-word descriptors. */ | |
476 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8); | |
477 | ||
478 | /* Write descriptor address and initiate descriptor fetch. */ | |
479 | desc_phys = jzchan->desc->desc_phys + | |
480 | (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc)); | |
481 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys); | |
482 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id)); | |
483 | ||
484 | /* Enable the channel. */ | |
485 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), | |
486 | JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE); | |
487 | } | |
488 | ||
489 | static void jz4780_dma_issue_pending(struct dma_chan *chan) | |
490 | { | |
491 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
492 | unsigned long flags; | |
493 | ||
494 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | |
495 | ||
496 | if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc) | |
497 | jz4780_dma_begin(jzchan); | |
498 | ||
499 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | |
500 | } | |
501 | ||
46fa5168 | 502 | static int jz4780_dma_terminate_all(struct dma_chan *chan) |
d894fc60 | 503 | { |
46fa5168 | 504 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); |
d894fc60 AS |
505 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); |
506 | unsigned long flags; | |
507 | LIST_HEAD(head); | |
508 | ||
509 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | |
510 | ||
511 | /* Clear the DMA status and stop the transfer. */ | |
512 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); | |
513 | if (jzchan->desc) { | |
514 | jz4780_dma_desc_free(&jzchan->desc->vdesc); | |
515 | jzchan->desc = NULL; | |
516 | } | |
517 | ||
518 | vchan_get_all_descriptors(&jzchan->vchan, &head); | |
519 | ||
520 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | |
521 | ||
522 | vchan_dma_desc_free_list(&jzchan->vchan, &head); | |
523 | return 0; | |
524 | } | |
525 | ||
46fa5168 AS |
526 | static int jz4780_dma_config(struct dma_chan *chan, |
527 | struct dma_slave_config *config) | |
d894fc60 | 528 | { |
46fa5168 AS |
529 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); |
530 | ||
d894fc60 AS |
531 | if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
532 | || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
533 | return -EINVAL; | |
534 | ||
535 | /* Copy the reset of the slave configuration, it is used later. */ | |
536 | memcpy(&jzchan->config, config, sizeof(jzchan->config)); | |
537 | ||
538 | return 0; | |
539 | } | |
540 | ||
541 | static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan, | |
542 | struct jz4780_dma_desc *desc, unsigned int next_sg) | |
543 | { | |
544 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | |
545 | unsigned int residue, count; | |
546 | unsigned int i; | |
547 | ||
548 | residue = 0; | |
549 | ||
550 | for (i = next_sg; i < desc->count; i++) | |
551 | residue += desc->desc[i].dtc << jzchan->transfer_shift; | |
552 | ||
553 | if (next_sg != 0) { | |
554 | count = jz4780_dma_readl(jzdma, | |
555 | JZ_DMA_REG_DTC(jzchan->id)); | |
556 | residue += count << jzchan->transfer_shift; | |
557 | } | |
558 | ||
559 | return residue; | |
560 | } | |
561 | ||
562 | static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, | |
563 | dma_cookie_t cookie, struct dma_tx_state *txstate) | |
564 | { | |
565 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
566 | struct virt_dma_desc *vdesc; | |
567 | enum dma_status status; | |
568 | unsigned long flags; | |
569 | ||
570 | status = dma_cookie_status(chan, cookie, txstate); | |
571 | if ((status == DMA_COMPLETE) || (txstate == NULL)) | |
572 | return status; | |
573 | ||
574 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | |
575 | ||
576 | vdesc = vchan_find_desc(&jzchan->vchan, cookie); | |
577 | if (vdesc) { | |
578 | /* On the issued list, so hasn't been processed yet */ | |
579 | txstate->residue = jz4780_dma_desc_residue(jzchan, | |
580 | to_jz4780_dma_desc(vdesc), 0); | |
581 | } else if (cookie == jzchan->desc->vdesc.tx.cookie) { | |
582 | txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, | |
583 | (jzchan->curr_hwdesc + 1) % jzchan->desc->count); | |
584 | } else | |
585 | txstate->residue = 0; | |
586 | ||
587 | if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc | |
839896ef AS |
588 | && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) |
589 | status = DMA_ERROR; | |
d894fc60 AS |
590 | |
591 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | |
592 | return status; | |
593 | } | |
594 | ||
595 | static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, | |
596 | struct jz4780_dma_chan *jzchan) | |
597 | { | |
598 | uint32_t dcs; | |
599 | ||
600 | spin_lock(&jzchan->vchan.lock); | |
601 | ||
602 | dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id)); | |
603 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); | |
604 | ||
605 | if (dcs & JZ_DMA_DCS_AR) { | |
606 | dev_warn(&jzchan->vchan.chan.dev->device, | |
607 | "address error (DCS=0x%x)\n", dcs); | |
608 | } | |
609 | ||
610 | if (dcs & JZ_DMA_DCS_HLT) { | |
611 | dev_warn(&jzchan->vchan.chan.dev->device, | |
612 | "channel halt (DCS=0x%x)\n", dcs); | |
613 | } | |
614 | ||
615 | if (jzchan->desc) { | |
616 | jzchan->desc->status = dcs; | |
617 | ||
618 | if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { | |
619 | if (jzchan->desc->type == DMA_CYCLIC) { | |
620 | vchan_cyclic_callback(&jzchan->desc->vdesc); | |
621 | } else { | |
622 | vchan_cookie_complete(&jzchan->desc->vdesc); | |
623 | jzchan->desc = NULL; | |
624 | } | |
625 | ||
626 | jz4780_dma_begin(jzchan); | |
627 | } | |
628 | } else { | |
629 | dev_err(&jzchan->vchan.chan.dev->device, | |
630 | "channel IRQ with no active transfer\n"); | |
631 | } | |
632 | ||
633 | spin_unlock(&jzchan->vchan.lock); | |
634 | } | |
635 | ||
636 | static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) | |
637 | { | |
638 | struct jz4780_dma_dev *jzdma = data; | |
639 | uint32_t pending, dmac; | |
640 | int i; | |
641 | ||
642 | pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP); | |
643 | ||
644 | for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) { | |
645 | if (!(pending & (1<<i))) | |
646 | continue; | |
647 | ||
648 | jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]); | |
649 | } | |
650 | ||
651 | /* Clear halt and address error status of all channels. */ | |
652 | dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC); | |
653 | dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR); | |
654 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac); | |
655 | ||
656 | /* Clear interrupt pending status. */ | |
657 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0); | |
658 | ||
659 | return IRQ_HANDLED; | |
660 | } | |
661 | ||
662 | static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan) | |
663 | { | |
664 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
665 | ||
666 | jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device), | |
667 | chan->device->dev, | |
668 | JZ_DMA_DESC_BLOCK_SIZE, | |
669 | PAGE_SIZE, 0); | |
670 | if (!jzchan->desc_pool) { | |
671 | dev_err(&chan->dev->device, | |
672 | "failed to allocate descriptor pool\n"); | |
673 | return -ENOMEM; | |
674 | } | |
675 | ||
676 | return 0; | |
677 | } | |
678 | ||
679 | static void jz4780_dma_free_chan_resources(struct dma_chan *chan) | |
680 | { | |
681 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
682 | ||
683 | vchan_free_chan_resources(&jzchan->vchan); | |
684 | dma_pool_destroy(jzchan->desc_pool); | |
685 | jzchan->desc_pool = NULL; | |
686 | } | |
687 | ||
688 | static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) | |
689 | { | |
690 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | |
691 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | |
026fd406 AS |
692 | struct jz4780_dma_filter_data *data = param; |
693 | ||
694 | if (jzdma->dma_device.dev->of_node != data->of_node) | |
695 | return false; | |
d894fc60 AS |
696 | |
697 | if (data->channel > -1) { | |
698 | if (data->channel != jzchan->id) | |
699 | return false; | |
700 | } else if (jzdma->chan_reserved & BIT(jzchan->id)) { | |
701 | return false; | |
702 | } | |
703 | ||
704 | jzchan->transfer_type = data->transfer_type; | |
705 | ||
706 | return true; | |
707 | } | |
708 | ||
709 | static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, | |
710 | struct of_dma *ofdma) | |
711 | { | |
712 | struct jz4780_dma_dev *jzdma = ofdma->of_dma_data; | |
713 | dma_cap_mask_t mask = jzdma->dma_device.cap_mask; | |
026fd406 | 714 | struct jz4780_dma_filter_data data; |
d894fc60 AS |
715 | |
716 | if (dma_spec->args_count != 2) | |
717 | return NULL; | |
718 | ||
026fd406 | 719 | data.of_node = ofdma->of_node; |
d894fc60 AS |
720 | data.transfer_type = dma_spec->args[0]; |
721 | data.channel = dma_spec->args[1]; | |
722 | ||
723 | if (data.channel > -1) { | |
724 | if (data.channel >= JZ_DMA_NR_CHANNELS) { | |
725 | dev_err(jzdma->dma_device.dev, | |
726 | "device requested non-existent channel %u\n", | |
727 | data.channel); | |
728 | return NULL; | |
729 | } | |
730 | ||
731 | /* Can only select a channel marked as reserved. */ | |
732 | if (!(jzdma->chan_reserved & BIT(data.channel))) { | |
733 | dev_err(jzdma->dma_device.dev, | |
734 | "device requested unreserved channel %u\n", | |
735 | data.channel); | |
736 | return NULL; | |
737 | } | |
d894fc60 | 738 | |
d3273e10 AS |
739 | jzdma->chan[data.channel].transfer_type = data.transfer_type; |
740 | ||
741 | return dma_get_slave_channel( | |
742 | &jzdma->chan[data.channel].vchan.chan); | |
743 | } else { | |
744 | return dma_request_channel(mask, jz4780_dma_filter_fn, &data); | |
745 | } | |
d894fc60 AS |
746 | } |
747 | ||
748 | static int jz4780_dma_probe(struct platform_device *pdev) | |
749 | { | |
750 | struct device *dev = &pdev->dev; | |
751 | struct jz4780_dma_dev *jzdma; | |
752 | struct jz4780_dma_chan *jzchan; | |
753 | struct dma_device *dd; | |
754 | struct resource *res; | |
755 | int i, ret; | |
756 | ||
757 | jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL); | |
758 | if (!jzdma) | |
759 | return -ENOMEM; | |
760 | ||
761 | platform_set_drvdata(pdev, jzdma); | |
762 | ||
763 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
764 | if (!res) { | |
765 | dev_err(dev, "failed to get I/O memory\n"); | |
766 | return -EINVAL; | |
767 | } | |
768 | ||
769 | jzdma->base = devm_ioremap_resource(dev, res); | |
770 | if (IS_ERR(jzdma->base)) | |
771 | return PTR_ERR(jzdma->base); | |
772 | ||
839896ef AS |
773 | ret = platform_get_irq(pdev, 0); |
774 | if (ret < 0) { | |
d894fc60 | 775 | dev_err(dev, "failed to get IRQ: %d\n", ret); |
839896ef | 776 | return ret; |
d894fc60 AS |
777 | } |
778 | ||
839896ef AS |
779 | jzdma->irq = ret; |
780 | ||
d509a83c AS |
781 | ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev), |
782 | jzdma); | |
d894fc60 AS |
783 | if (ret) { |
784 | dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); | |
839896ef | 785 | return ret; |
d894fc60 AS |
786 | } |
787 | ||
788 | jzdma->clk = devm_clk_get(dev, NULL); | |
789 | if (IS_ERR(jzdma->clk)) { | |
790 | dev_err(dev, "failed to get clock\n"); | |
d509a83c AS |
791 | ret = PTR_ERR(jzdma->clk); |
792 | goto err_free_irq; | |
d894fc60 AS |
793 | } |
794 | ||
795 | clk_prepare_enable(jzdma->clk); | |
796 | ||
797 | /* Property is optional, if it doesn't exist the value will remain 0. */ | |
798 | of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels", | |
799 | 0, &jzdma->chan_reserved); | |
800 | ||
801 | dd = &jzdma->dma_device; | |
802 | ||
803 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | |
804 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | |
805 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | |
806 | ||
807 | dd->dev = dev; | |
77a68e56 | 808 | dd->copy_align = DMAENGINE_ALIGN_4_BYTES; |
d894fc60 AS |
809 | dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources; |
810 | dd->device_free_chan_resources = jz4780_dma_free_chan_resources; | |
811 | dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg; | |
812 | dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic; | |
813 | dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; | |
46fa5168 | 814 | dd->device_config = jz4780_dma_config; |
d894fc60 AS |
815 | dd->device_terminate_all = jz4780_dma_terminate_all; |
816 | dd->device_tx_status = jz4780_dma_tx_status; | |
817 | dd->device_issue_pending = jz4780_dma_issue_pending; | |
818 | dd->src_addr_widths = JZ_DMA_BUSWIDTHS; | |
819 | dd->dst_addr_widths = JZ_DMA_BUSWIDTHS; | |
820 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
821 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
822 | ||
d894fc60 AS |
823 | /* |
824 | * Enable DMA controller, mark all channels as not programmable. | |
825 | * Also set the FMSC bit - it increases MSC performance, so it makes | |
826 | * little sense not to enable it. | |
827 | */ | |
828 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, | |
829 | JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC); | |
830 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0); | |
831 | ||
832 | INIT_LIST_HEAD(&dd->channels); | |
833 | ||
834 | for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) { | |
835 | jzchan = &jzdma->chan[i]; | |
836 | jzchan->id = i; | |
837 | ||
838 | vchan_init(&jzchan->vchan, dd); | |
839 | jzchan->vchan.desc_free = jz4780_dma_desc_free; | |
840 | } | |
841 | ||
842 | ret = dma_async_device_register(dd); | |
843 | if (ret) { | |
844 | dev_err(dev, "failed to register device\n"); | |
845 | goto err_disable_clk; | |
846 | } | |
847 | ||
848 | /* Register with OF DMA helpers. */ | |
849 | ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate, | |
850 | jzdma); | |
851 | if (ret) { | |
852 | dev_err(dev, "failed to register OF DMA controller\n"); | |
853 | goto err_unregister_dev; | |
854 | } | |
855 | ||
856 | dev_info(dev, "JZ4780 DMA controller initialised\n"); | |
857 | return 0; | |
858 | ||
859 | err_unregister_dev: | |
860 | dma_async_device_unregister(dd); | |
861 | ||
862 | err_disable_clk: | |
863 | clk_disable_unprepare(jzdma->clk); | |
d509a83c AS |
864 | |
865 | err_free_irq: | |
866 | free_irq(jzdma->irq, jzdma); | |
d894fc60 AS |
867 | return ret; |
868 | } | |
869 | ||
870 | static int jz4780_dma_remove(struct platform_device *pdev) | |
871 | { | |
872 | struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev); | |
ae9c02b4 | 873 | int i; |
d894fc60 AS |
874 | |
875 | of_dma_controller_free(pdev->dev.of_node); | |
ae9c02b4 | 876 | |
d509a83c | 877 | free_irq(jzdma->irq, jzdma); |
ae9c02b4 AS |
878 | |
879 | for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) | |
880 | tasklet_kill(&jzdma->chan[i].vchan.task); | |
881 | ||
d894fc60 AS |
882 | dma_async_device_unregister(&jzdma->dma_device); |
883 | return 0; | |
884 | } | |
885 | ||
886 | static const struct of_device_id jz4780_dma_dt_match[] = { | |
887 | { .compatible = "ingenic,jz4780-dma", .data = NULL }, | |
888 | {}, | |
889 | }; | |
890 | MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); | |
891 | ||
892 | static struct platform_driver jz4780_dma_driver = { | |
893 | .probe = jz4780_dma_probe, | |
894 | .remove = jz4780_dma_remove, | |
895 | .driver = { | |
896 | .name = "jz4780-dma", | |
897 | .of_match_table = of_match_ptr(jz4780_dma_dt_match), | |
898 | }, | |
899 | }; | |
900 | ||
901 | static int __init jz4780_dma_init(void) | |
902 | { | |
903 | return platform_driver_register(&jz4780_dma_driver); | |
904 | } | |
905 | subsys_initcall(jz4780_dma_init); | |
906 | ||
907 | static void __exit jz4780_dma_exit(void) | |
908 | { | |
909 | platform_driver_unregister(&jz4780_dma_driver); | |
910 | } | |
911 | module_exit(jz4780_dma_exit); | |
912 | ||
913 | MODULE_AUTHOR("Alex Smith <[email protected]>"); | |
914 | MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver"); | |
915 | MODULE_LICENSE("GPL"); |