]>
Commit | Line | Data |
---|---|---|
b096c137 EL |
1 | /* |
2 | * Copyright (C) 2014 Emilio López | |
3 | * Emilio López <[email protected]> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | */ | |
10 | ||
11 | #include <linux/bitmap.h> | |
12 | #include <linux/bitops.h> | |
13 | #include <linux/clk.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dmapool.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/of_dma.h> | |
19 | #include <linux/platform_device.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/spinlock.h> | |
22 | ||
23 | #include "virt-dma.h" | |
24 | ||
25 | /** Common macros to normal and dedicated DMA registers **/ | |
26 | ||
27 | #define SUN4I_DMA_CFG_LOADING BIT(31) | |
28 | #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25) | |
29 | #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23) | |
30 | #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21) | |
31 | #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16) | |
32 | #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9) | |
33 | #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7) | |
34 | #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5) | |
35 | #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type) | |
36 | ||
37 | /** Normal DMA register values **/ | |
38 | ||
39 | /* Normal DMA source/destination data request type values */ | |
40 | #define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16 | |
41 | #define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1) | |
42 | ||
43 | /** Normal DMA register layout **/ | |
44 | ||
45 | /* Dedicated DMA source/destination address mode values */ | |
46 | #define SUN4I_NDMA_ADDR_MODE_LINEAR 0 | |
47 | #define SUN4I_NDMA_ADDR_MODE_IO 1 | |
48 | ||
49 | /* Normal DMA configuration register layout */ | |
50 | #define SUN4I_NDMA_CFG_CONT_MODE BIT(30) | |
51 | #define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27) | |
52 | #define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22) | |
53 | #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) | |
54 | #define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6) | |
55 | ||
56 | /** Dedicated DMA register values **/ | |
57 | ||
58 | /* Dedicated DMA source/destination address mode values */ | |
59 | #define SUN4I_DDMA_ADDR_MODE_LINEAR 0 | |
60 | #define SUN4I_DDMA_ADDR_MODE_IO 1 | |
61 | #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2 | |
62 | #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3 | |
63 | ||
64 | /* Dedicated DMA source/destination data request type values */ | |
65 | #define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1 | |
66 | #define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1) | |
67 | ||
68 | /** Dedicated DMA register layout **/ | |
69 | ||
70 | /* Dedicated DMA configuration register layout */ | |
71 | #define SUN4I_DDMA_CFG_BUSY BIT(30) | |
72 | #define SUN4I_DDMA_CFG_CONT_MODE BIT(29) | |
73 | #define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28) | |
74 | #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15) | |
75 | #define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12) | |
76 | ||
77 | /* Dedicated DMA parameter register layout */ | |
78 | #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24) | |
79 | #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16) | |
80 | #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8) | |
81 | #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0) | |
82 | ||
83 | /** DMA register offsets **/ | |
84 | ||
85 | /* General register offsets */ | |
86 | #define SUN4I_DMA_IRQ_ENABLE_REG 0x0 | |
87 | #define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4 | |
88 | ||
89 | /* Normal DMA register offsets */ | |
90 | #define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20) | |
91 | #define SUN4I_NDMA_CFG_REG 0x0 | |
92 | #define SUN4I_NDMA_SRC_ADDR_REG 0x4 | |
93 | #define SUN4I_NDMA_DST_ADDR_REG 0x8 | |
94 | #define SUN4I_NDMA_BYTE_COUNT_REG 0xC | |
95 | ||
96 | /* Dedicated DMA register offsets */ | |
97 | #define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20) | |
98 | #define SUN4I_DDMA_CFG_REG 0x0 | |
99 | #define SUN4I_DDMA_SRC_ADDR_REG 0x4 | |
100 | #define SUN4I_DDMA_DST_ADDR_REG 0x8 | |
101 | #define SUN4I_DDMA_BYTE_COUNT_REG 0xC | |
102 | #define SUN4I_DDMA_PARA_REG 0x18 | |
103 | ||
104 | /** DMA Driver **/ | |
105 | ||
106 | /* | |
107 | * Normal DMA has 8 channels, and Dedicated DMA has another 8, so | |
108 | * that's 16 channels. As for endpoints, there's 29 and 21 | |
109 | * respectively. Given that the Normal DMA endpoints (other than | |
110 | * SDRAM) can be used as tx/rx, we need 78 vchans in total | |
111 | */ | |
112 | #define SUN4I_NDMA_NR_MAX_CHANNELS 8 | |
113 | #define SUN4I_DDMA_NR_MAX_CHANNELS 8 | |
114 | #define SUN4I_DMA_NR_MAX_CHANNELS \ | |
115 | (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS) | |
116 | #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1) | |
117 | #define SUN4I_DDMA_NR_MAX_VCHANS 21 | |
118 | #define SUN4I_DMA_NR_MAX_VCHANS \ | |
119 | (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS) | |
120 | ||
121 | /* This set of SUN4I_DDMA timing parameters were found experimentally while | |
122 | * working with the SPI driver and seem to make it behave correctly */ | |
123 | #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \ | |
124 | (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \ | |
125 | SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \ | |
126 | SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \ | |
127 | SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2)) | |
128 | ||
129 | struct sun4i_dma_pchan { | |
130 | /* Register base of channel */ | |
131 | void __iomem *base; | |
132 | /* vchan currently being serviced */ | |
133 | struct sun4i_dma_vchan *vchan; | |
134 | /* Is this a dedicated pchan? */ | |
135 | int is_dedicated; | |
136 | }; | |
137 | ||
138 | struct sun4i_dma_vchan { | |
139 | struct virt_dma_chan vc; | |
140 | struct dma_slave_config cfg; | |
141 | struct sun4i_dma_pchan *pchan; | |
142 | struct sun4i_dma_promise *processing; | |
143 | struct sun4i_dma_contract *contract; | |
144 | u8 endpoint; | |
145 | int is_dedicated; | |
146 | }; | |
147 | ||
148 | struct sun4i_dma_promise { | |
149 | u32 cfg; | |
150 | u32 para; | |
151 | dma_addr_t src; | |
152 | dma_addr_t dst; | |
153 | size_t len; | |
154 | struct list_head list; | |
155 | }; | |
156 | ||
157 | /* A contract is a set of promises */ | |
158 | struct sun4i_dma_contract { | |
159 | struct virt_dma_desc vd; | |
160 | struct list_head demands; | |
161 | struct list_head completed_demands; | |
162 | int is_cyclic; | |
163 | }; | |
164 | ||
165 | struct sun4i_dma_dev { | |
166 | DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS); | |
167 | struct dma_device slave; | |
168 | struct sun4i_dma_pchan *pchans; | |
169 | struct sun4i_dma_vchan *vchans; | |
170 | void __iomem *base; | |
171 | struct clk *clk; | |
172 | int irq; | |
173 | spinlock_t lock; | |
174 | }; | |
175 | ||
176 | static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev) | |
177 | { | |
178 | return container_of(dev, struct sun4i_dma_dev, slave); | |
179 | } | |
180 | ||
181 | static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan) | |
182 | { | |
183 | return container_of(chan, struct sun4i_dma_vchan, vc.chan); | |
184 | } | |
185 | ||
186 | static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd) | |
187 | { | |
188 | return container_of(vd, struct sun4i_dma_contract, vd); | |
189 | } | |
190 | ||
191 | static struct device *chan2dev(struct dma_chan *chan) | |
192 | { | |
193 | return &chan->dev->device; | |
194 | } | |
195 | ||
196 | static int convert_burst(u32 maxburst) | |
197 | { | |
198 | if (maxburst > 8) | |
199 | return -EINVAL; | |
200 | ||
201 | /* 1 -> 0, 4 -> 1, 8 -> 2 */ | |
202 | return (maxburst >> 2); | |
203 | } | |
204 | ||
205 | static int convert_buswidth(enum dma_slave_buswidth addr_width) | |
206 | { | |
207 | if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) | |
208 | return -EINVAL; | |
209 | ||
210 | /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */ | |
211 | return (addr_width >> 1); | |
212 | } | |
213 | ||
214 | static void sun4i_dma_free_chan_resources(struct dma_chan *chan) | |
215 | { | |
216 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
217 | ||
218 | vchan_free_chan_resources(&vchan->vc); | |
219 | } | |
220 | ||
221 | static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, | |
222 | struct sun4i_dma_vchan *vchan) | |
223 | { | |
224 | struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans; | |
225 | unsigned long flags; | |
226 | int i, max; | |
227 | ||
228 | /* | |
229 | * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and | |
230 | * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones | |
231 | */ | |
232 | if (vchan->is_dedicated) { | |
233 | i = SUN4I_NDMA_NR_MAX_CHANNELS; | |
234 | max = SUN4I_DMA_NR_MAX_CHANNELS; | |
235 | } else { | |
236 | i = 0; | |
237 | max = SUN4I_NDMA_NR_MAX_CHANNELS; | |
238 | } | |
239 | ||
240 | spin_lock_irqsave(&priv->lock, flags); | |
57192245 | 241 | for_each_clear_bit_from(i, priv->pchans_used, max) { |
b096c137 EL |
242 | pchan = &pchans[i]; |
243 | pchan->vchan = vchan; | |
244 | set_bit(i, priv->pchans_used); | |
245 | break; | |
246 | } | |
247 | spin_unlock_irqrestore(&priv->lock, flags); | |
248 | ||
249 | return pchan; | |
250 | } | |
251 | ||
252 | static void release_pchan(struct sun4i_dma_dev *priv, | |
253 | struct sun4i_dma_pchan *pchan) | |
254 | { | |
255 | unsigned long flags; | |
256 | int nr = pchan - priv->pchans; | |
257 | ||
258 | spin_lock_irqsave(&priv->lock, flags); | |
259 | ||
260 | pchan->vchan = NULL; | |
261 | clear_bit(nr, priv->pchans_used); | |
262 | ||
263 | spin_unlock_irqrestore(&priv->lock, flags); | |
264 | } | |
265 | ||
266 | static void configure_pchan(struct sun4i_dma_pchan *pchan, | |
267 | struct sun4i_dma_promise *d) | |
268 | { | |
269 | /* | |
270 | * Configure addresses and misc parameters depending on type | |
271 | * SUN4I_DDMA has an extra field with timing parameters | |
272 | */ | |
273 | if (pchan->is_dedicated) { | |
274 | writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG); | |
275 | writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG); | |
276 | writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); | |
277 | writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG); | |
278 | writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG); | |
279 | } else { | |
280 | writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG); | |
281 | writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG); | |
282 | writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); | |
283 | writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG); | |
284 | } | |
285 | } | |
286 | ||
287 | static void set_pchan_interrupt(struct sun4i_dma_dev *priv, | |
288 | struct sun4i_dma_pchan *pchan, | |
289 | int half, int end) | |
290 | { | |
291 | u32 reg; | |
292 | int pchan_number = pchan - priv->pchans; | |
293 | unsigned long flags; | |
294 | ||
295 | spin_lock_irqsave(&priv->lock, flags); | |
296 | ||
297 | reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); | |
298 | ||
299 | if (half) | |
300 | reg |= BIT(pchan_number * 2); | |
301 | else | |
302 | reg &= ~BIT(pchan_number * 2); | |
303 | ||
304 | if (end) | |
305 | reg |= BIT(pchan_number * 2 + 1); | |
306 | else | |
307 | reg &= ~BIT(pchan_number * 2 + 1); | |
308 | ||
309 | writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG); | |
310 | ||
311 | spin_unlock_irqrestore(&priv->lock, flags); | |
312 | } | |
313 | ||
314 | /** | |
315 | * Execute pending operations on a vchan | |
316 | * | |
317 | * When given a vchan, this function will try to acquire a suitable | |
318 | * pchan and, if successful, will configure it to fulfill a promise | |
319 | * from the next pending contract. | |
320 | * | |
321 | * This function must be called with &vchan->vc.lock held. | |
322 | */ | |
323 | static int __execute_vchan_pending(struct sun4i_dma_dev *priv, | |
324 | struct sun4i_dma_vchan *vchan) | |
325 | { | |
326 | struct sun4i_dma_promise *promise = NULL; | |
327 | struct sun4i_dma_contract *contract = NULL; | |
328 | struct sun4i_dma_pchan *pchan; | |
329 | struct virt_dma_desc *vd; | |
330 | int ret; | |
331 | ||
332 | lockdep_assert_held(&vchan->vc.lock); | |
333 | ||
334 | /* We need a pchan to do anything, so secure one if available */ | |
335 | pchan = find_and_use_pchan(priv, vchan); | |
336 | if (!pchan) | |
337 | return -EBUSY; | |
338 | ||
339 | /* | |
340 | * Channel endpoints must not be repeated, so if this vchan | |
341 | * has already submitted some work, we can't do anything else | |
342 | */ | |
343 | if (vchan->processing) { | |
344 | dev_dbg(chan2dev(&vchan->vc.chan), | |
345 | "processing something to this endpoint already\n"); | |
346 | ret = -EBUSY; | |
347 | goto release_pchan; | |
348 | } | |
349 | ||
350 | do { | |
351 | /* Figure out which contract we're working with today */ | |
352 | vd = vchan_next_desc(&vchan->vc); | |
353 | if (!vd) { | |
354 | dev_dbg(chan2dev(&vchan->vc.chan), | |
355 | "No pending contract found"); | |
356 | ret = 0; | |
357 | goto release_pchan; | |
358 | } | |
359 | ||
360 | contract = to_sun4i_dma_contract(vd); | |
361 | if (list_empty(&contract->demands)) { | |
362 | /* The contract has been completed so mark it as such */ | |
363 | list_del(&contract->vd.node); | |
364 | vchan_cookie_complete(&contract->vd); | |
365 | dev_dbg(chan2dev(&vchan->vc.chan), | |
366 | "Empty contract found and marked complete"); | |
367 | } | |
368 | } while (list_empty(&contract->demands)); | |
369 | ||
370 | /* Now find out what we need to do */ | |
371 | promise = list_first_entry(&contract->demands, | |
372 | struct sun4i_dma_promise, list); | |
373 | vchan->processing = promise; | |
374 | ||
375 | /* ... and make it reality */ | |
376 | if (promise) { | |
377 | vchan->contract = contract; | |
378 | vchan->pchan = pchan; | |
379 | set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1); | |
380 | configure_pchan(pchan, promise); | |
381 | } | |
382 | ||
383 | return 0; | |
384 | ||
385 | release_pchan: | |
386 | release_pchan(priv, pchan); | |
387 | return ret; | |
388 | } | |
389 | ||
390 | static int sanitize_config(struct dma_slave_config *sconfig, | |
391 | enum dma_transfer_direction direction) | |
392 | { | |
393 | switch (direction) { | |
394 | case DMA_MEM_TO_DEV: | |
395 | if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || | |
396 | !sconfig->dst_maxburst) | |
397 | return -EINVAL; | |
398 | ||
399 | if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | |
400 | sconfig->src_addr_width = sconfig->dst_addr_width; | |
401 | ||
402 | if (!sconfig->src_maxburst) | |
403 | sconfig->src_maxburst = sconfig->dst_maxburst; | |
404 | ||
405 | break; | |
406 | ||
407 | case DMA_DEV_TO_MEM: | |
408 | if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) || | |
409 | !sconfig->src_maxburst) | |
410 | return -EINVAL; | |
411 | ||
412 | if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | |
413 | sconfig->dst_addr_width = sconfig->src_addr_width; | |
414 | ||
415 | if (!sconfig->dst_maxburst) | |
416 | sconfig->dst_maxburst = sconfig->src_maxburst; | |
417 | ||
418 | break; | |
419 | default: | |
420 | return 0; | |
421 | } | |
422 | ||
423 | return 0; | |
424 | } | |
425 | ||
426 | /** | |
427 | * Generate a promise, to be used in a normal DMA contract. | |
428 | * | |
429 | * A NDMA promise contains all the information required to program the | |
430 | * normal part of the DMA Engine and get data copied. A non-executed | |
431 | * promise will live in the demands list on a contract. Once it has been | |
432 | * completed, it will be moved to the completed demands list for later freeing. | |
433 | * All linked promises will be freed when the corresponding contract is freed | |
434 | */ | |
435 | static struct sun4i_dma_promise * | |
436 | generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, | |
437 | size_t len, struct dma_slave_config *sconfig, | |
438 | enum dma_transfer_direction direction) | |
439 | { | |
440 | struct sun4i_dma_promise *promise; | |
441 | int ret; | |
442 | ||
443 | ret = sanitize_config(sconfig, direction); | |
444 | if (ret) | |
445 | return NULL; | |
446 | ||
447 | promise = kzalloc(sizeof(*promise), GFP_NOWAIT); | |
448 | if (!promise) | |
449 | return NULL; | |
450 | ||
451 | promise->src = src; | |
452 | promise->dst = dest; | |
453 | promise->len = len; | |
454 | promise->cfg = SUN4I_DMA_CFG_LOADING | | |
455 | SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN; | |
456 | ||
457 | dev_dbg(chan2dev(chan), | |
458 | "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d", | |
459 | sconfig->src_maxburst, sconfig->dst_maxburst, | |
460 | sconfig->src_addr_width, sconfig->dst_addr_width); | |
461 | ||
462 | /* Source burst */ | |
463 | ret = convert_burst(sconfig->src_maxburst); | |
287980e4 | 464 | if (ret < 0) |
b096c137 EL |
465 | goto fail; |
466 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); | |
467 | ||
468 | /* Destination burst */ | |
469 | ret = convert_burst(sconfig->dst_maxburst); | |
287980e4 | 470 | if (ret < 0) |
b096c137 EL |
471 | goto fail; |
472 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); | |
473 | ||
474 | /* Source bus width */ | |
475 | ret = convert_buswidth(sconfig->src_addr_width); | |
287980e4 | 476 | if (ret < 0) |
b096c137 EL |
477 | goto fail; |
478 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); | |
479 | ||
480 | /* Destination bus width */ | |
481 | ret = convert_buswidth(sconfig->dst_addr_width); | |
287980e4 | 482 | if (ret < 0) |
b096c137 EL |
483 | goto fail; |
484 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); | |
485 | ||
486 | return promise; | |
487 | ||
488 | fail: | |
489 | kfree(promise); | |
490 | return NULL; | |
491 | } | |
492 | ||
493 | /** | |
494 | * Generate a promise, to be used in a dedicated DMA contract. | |
495 | * | |
496 | * A DDMA promise contains all the information required to program the | |
497 | * Dedicated part of the DMA Engine and get data copied. A non-executed | |
498 | * promise will live in the demands list on a contract. Once it has been | |
499 | * completed, it will be moved to the completed demands list for later freeing. | |
500 | * All linked promises will be freed when the corresponding contract is freed | |
501 | */ | |
502 | static struct sun4i_dma_promise * | |
503 | generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest, | |
504 | size_t len, struct dma_slave_config *sconfig) | |
505 | { | |
506 | struct sun4i_dma_promise *promise; | |
507 | int ret; | |
508 | ||
509 | promise = kzalloc(sizeof(*promise), GFP_NOWAIT); | |
510 | if (!promise) | |
511 | return NULL; | |
512 | ||
513 | promise->src = src; | |
514 | promise->dst = dest; | |
515 | promise->len = len; | |
516 | promise->cfg = SUN4I_DMA_CFG_LOADING | | |
517 | SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN; | |
518 | ||
519 | /* Source burst */ | |
520 | ret = convert_burst(sconfig->src_maxburst); | |
287980e4 | 521 | if (ret < 0) |
b096c137 EL |
522 | goto fail; |
523 | promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret); | |
524 | ||
525 | /* Destination burst */ | |
526 | ret = convert_burst(sconfig->dst_maxburst); | |
287980e4 | 527 | if (ret < 0) |
b096c137 EL |
528 | goto fail; |
529 | promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret); | |
530 | ||
531 | /* Source bus width */ | |
532 | ret = convert_buswidth(sconfig->src_addr_width); | |
287980e4 | 533 | if (ret < 0) |
b096c137 EL |
534 | goto fail; |
535 | promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret); | |
536 | ||
537 | /* Destination bus width */ | |
538 | ret = convert_buswidth(sconfig->dst_addr_width); | |
287980e4 | 539 | if (ret < 0) |
b096c137 EL |
540 | goto fail; |
541 | promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret); | |
542 | ||
543 | return promise; | |
544 | ||
545 | fail: | |
546 | kfree(promise); | |
547 | return NULL; | |
548 | } | |
549 | ||
550 | /** | |
551 | * Generate a contract | |
552 | * | |
553 | * Contracts function as DMA descriptors. As our hardware does not support | |
554 | * linked lists, we need to implement SG via software. We use a contract | |
555 | * to hold all the pieces of the request and process them serially one | |
556 | * after another. Each piece is represented as a promise. | |
557 | */ | |
558 | static struct sun4i_dma_contract *generate_dma_contract(void) | |
559 | { | |
560 | struct sun4i_dma_contract *contract; | |
561 | ||
562 | contract = kzalloc(sizeof(*contract), GFP_NOWAIT); | |
563 | if (!contract) | |
564 | return NULL; | |
565 | ||
566 | INIT_LIST_HEAD(&contract->demands); | |
567 | INIT_LIST_HEAD(&contract->completed_demands); | |
568 | ||
569 | return contract; | |
570 | } | |
571 | ||
572 | /** | |
573 | * Get next promise on a cyclic transfer | |
574 | * | |
575 | * Cyclic contracts contain a series of promises which are executed on a | |
576 | * loop. This function returns the next promise from a cyclic contract, | |
577 | * so it can be programmed into the hardware. | |
578 | */ | |
579 | static struct sun4i_dma_promise * | |
580 | get_next_cyclic_promise(struct sun4i_dma_contract *contract) | |
581 | { | |
582 | struct sun4i_dma_promise *promise; | |
583 | ||
584 | promise = list_first_entry_or_null(&contract->demands, | |
585 | struct sun4i_dma_promise, list); | |
586 | if (!promise) { | |
587 | list_splice_init(&contract->completed_demands, | |
588 | &contract->demands); | |
589 | promise = list_first_entry(&contract->demands, | |
590 | struct sun4i_dma_promise, list); | |
591 | } | |
592 | ||
593 | return promise; | |
594 | } | |
595 | ||
596 | /** | |
597 | * Free a contract and all its associated promises | |
598 | */ | |
599 | static void sun4i_dma_free_contract(struct virt_dma_desc *vd) | |
600 | { | |
601 | struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); | |
40482e64 | 602 | struct sun4i_dma_promise *promise, *tmp; |
b096c137 EL |
603 | |
604 | /* Free all the demands and completed demands */ | |
40482e64 | 605 | list_for_each_entry_safe(promise, tmp, &contract->demands, list) |
b096c137 EL |
606 | kfree(promise); |
607 | ||
40482e64 | 608 | list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) |
b096c137 EL |
609 | kfree(promise); |
610 | ||
611 | kfree(contract); | |
612 | } | |
613 | ||
614 | static struct dma_async_tx_descriptor * | |
615 | sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | |
616 | dma_addr_t src, size_t len, unsigned long flags) | |
617 | { | |
618 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
619 | struct dma_slave_config *sconfig = &vchan->cfg; | |
620 | struct sun4i_dma_promise *promise; | |
621 | struct sun4i_dma_contract *contract; | |
622 | ||
623 | contract = generate_dma_contract(); | |
624 | if (!contract) | |
625 | return NULL; | |
626 | ||
627 | /* | |
628 | * We can only do the copy to bus aligned addresses, so | |
629 | * choose the best one so we get decent performance. We also | |
630 | * maximize the burst size for this same reason. | |
631 | */ | |
632 | sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
633 | sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
634 | sconfig->src_maxburst = 8; | |
635 | sconfig->dst_maxburst = 8; | |
636 | ||
637 | if (vchan->is_dedicated) | |
638 | promise = generate_ddma_promise(chan, src, dest, len, sconfig); | |
639 | else | |
640 | promise = generate_ndma_promise(chan, src, dest, len, sconfig, | |
641 | DMA_MEM_TO_MEM); | |
642 | ||
643 | if (!promise) { | |
644 | kfree(contract); | |
645 | return NULL; | |
646 | } | |
647 | ||
648 | /* Configure memcpy mode */ | |
649 | if (vchan->is_dedicated) { | |
650 | promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) | | |
651 | SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM); | |
652 | } else { | |
653 | promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | | |
654 | SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); | |
655 | } | |
656 | ||
657 | /* Fill the contract with our only promise */ | |
658 | list_add_tail(&promise->list, &contract->demands); | |
659 | ||
660 | /* And add it to the vchan */ | |
661 | return vchan_tx_prep(&vchan->vc, &contract->vd, flags); | |
662 | } | |
663 | ||
664 | static struct dma_async_tx_descriptor * | |
665 | sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, | |
666 | size_t period_len, enum dma_transfer_direction dir, | |
667 | unsigned long flags) | |
668 | { | |
669 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
670 | struct dma_slave_config *sconfig = &vchan->cfg; | |
671 | struct sun4i_dma_promise *promise; | |
672 | struct sun4i_dma_contract *contract; | |
673 | dma_addr_t src, dest; | |
674 | u32 endpoints; | |
675 | int nr_periods, offset, plength, i; | |
676 | ||
677 | if (!is_slave_direction(dir)) { | |
678 | dev_err(chan2dev(chan), "Invalid DMA direction\n"); | |
679 | return NULL; | |
680 | } | |
681 | ||
682 | if (vchan->is_dedicated) { | |
683 | /* | |
684 | * As we are using this just for audio data, we need to use | |
685 | * normal DMA. There is nothing stopping us from supporting | |
686 | * dedicated DMA here as well, so if a client comes up and | |
687 | * requires it, it will be simple to implement it. | |
688 | */ | |
689 | dev_err(chan2dev(chan), | |
690 | "Cyclic transfers are only supported on Normal DMA\n"); | |
691 | return NULL; | |
692 | } | |
693 | ||
694 | contract = generate_dma_contract(); | |
695 | if (!contract) | |
696 | return NULL; | |
697 | ||
698 | contract->is_cyclic = 1; | |
699 | ||
700 | /* Figure out the endpoints and the address we need */ | |
701 | if (dir == DMA_MEM_TO_DEV) { | |
702 | src = buf; | |
703 | dest = sconfig->dst_addr; | |
704 | endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | | |
705 | SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | | |
706 | SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO); | |
707 | } else { | |
708 | src = sconfig->src_addr; | |
709 | dest = buf; | |
710 | endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | | |
711 | SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) | | |
712 | SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); | |
713 | } | |
714 | ||
715 | /* | |
716 | * We will be using half done interrupts to make two periods | |
717 | * out of a promise, so we need to program the DMA engine less | |
718 | * often | |
719 | */ | |
720 | ||
721 | /* | |
722 | * The engine can interrupt on half-transfer, so we can use | |
723 | * this feature to program the engine half as often as if we | |
724 | * didn't use it (keep in mind the hardware doesn't support | |
725 | * linked lists). | |
726 | * | |
727 | * Say you have a set of periods (| marks the start/end, I for | |
728 | * interrupt, P for programming the engine to do a new | |
729 | * transfer), the easy but slow way would be to do | |
730 | * | |
731 | * |---|---|---|---| (periods / promises) | |
732 | * P I,P I,P I,P I | |
733 | * | |
734 | * Using half transfer interrupts you can do | |
735 | * | |
736 | * |-------|-------| (promises as configured on hw) | |
737 | * |---|---|---|---| (periods) | |
738 | * P I I,P I I | |
739 | * | |
740 | * Which requires half the engine programming for the same | |
741 | * functionality. | |
742 | */ | |
743 | nr_periods = DIV_ROUND_UP(len / period_len, 2); | |
744 | for (i = 0; i < nr_periods; i++) { | |
745 | /* Calculate the offset in the buffer and the length needed */ | |
746 | offset = i * period_len * 2; | |
747 | plength = min((len - offset), (period_len * 2)); | |
748 | if (dir == DMA_MEM_TO_DEV) | |
749 | src = buf + offset; | |
750 | else | |
751 | dest = buf + offset; | |
752 | ||
753 | /* Make the promise */ | |
754 | promise = generate_ndma_promise(chan, src, dest, | |
755 | plength, sconfig, dir); | |
756 | if (!promise) { | |
757 | /* TODO: should we free everything? */ | |
758 | return NULL; | |
759 | } | |
760 | promise->cfg |= endpoints; | |
761 | ||
762 | /* Then add it to the contract */ | |
763 | list_add_tail(&promise->list, &contract->demands); | |
764 | } | |
765 | ||
766 | /* And add it to the vchan */ | |
767 | return vchan_tx_prep(&vchan->vc, &contract->vd, flags); | |
768 | } | |
769 | ||
770 | static struct dma_async_tx_descriptor * | |
771 | sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
772 | unsigned int sg_len, enum dma_transfer_direction dir, | |
773 | unsigned long flags, void *context) | |
774 | { | |
775 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
776 | struct dma_slave_config *sconfig = &vchan->cfg; | |
777 | struct sun4i_dma_promise *promise; | |
778 | struct sun4i_dma_contract *contract; | |
779 | u8 ram_type, io_mode, linear_mode; | |
780 | struct scatterlist *sg; | |
781 | dma_addr_t srcaddr, dstaddr; | |
782 | u32 endpoints, para; | |
783 | int i; | |
784 | ||
785 | if (!sgl) | |
786 | return NULL; | |
787 | ||
788 | if (!is_slave_direction(dir)) { | |
789 | dev_err(chan2dev(chan), "Invalid DMA direction\n"); | |
790 | return NULL; | |
791 | } | |
792 | ||
793 | contract = generate_dma_contract(); | |
794 | if (!contract) | |
795 | return NULL; | |
796 | ||
797 | if (vchan->is_dedicated) { | |
798 | io_mode = SUN4I_DDMA_ADDR_MODE_IO; | |
799 | linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; | |
800 | ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; | |
801 | } else { | |
802 | io_mode = SUN4I_NDMA_ADDR_MODE_IO; | |
803 | linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; | |
804 | ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; | |
805 | } | |
806 | ||
807 | if (dir == DMA_MEM_TO_DEV) | |
808 | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | | |
809 | SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | | |
810 | SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) | | |
811 | SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode); | |
812 | else | |
813 | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | | |
814 | SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) | | |
815 | SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | | |
816 | SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); | |
817 | ||
818 | for_each_sg(sgl, sg, sg_len, i) { | |
819 | /* Figure out addresses */ | |
820 | if (dir == DMA_MEM_TO_DEV) { | |
821 | srcaddr = sg_dma_address(sg); | |
822 | dstaddr = sconfig->dst_addr; | |
823 | } else { | |
824 | srcaddr = sconfig->src_addr; | |
825 | dstaddr = sg_dma_address(sg); | |
826 | } | |
827 | ||
828 | /* | |
829 | * These are the magic DMA engine timings that keep SPI going. | |
830 | * I haven't seen any interface on DMAEngine to configure | |
831 | * timings, and so far they seem to work for everything we | |
832 | * support, so I've kept them here. I don't know if other | |
833 | * devices need different timings because, as usual, we only | |
834 | * have the "para" bitfield meanings, but no comment on what | |
835 | * the values should be when doing a certain operation :| | |
836 | */ | |
837 | para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS; | |
838 | ||
839 | /* And make a suitable promise */ | |
840 | if (vchan->is_dedicated) | |
841 | promise = generate_ddma_promise(chan, srcaddr, dstaddr, | |
842 | sg_dma_len(sg), | |
843 | sconfig); | |
844 | else | |
845 | promise = generate_ndma_promise(chan, srcaddr, dstaddr, | |
846 | sg_dma_len(sg), | |
847 | sconfig, dir); | |
848 | ||
849 | if (!promise) | |
850 | return NULL; /* TODO: should we free everything? */ | |
851 | ||
852 | promise->cfg |= endpoints; | |
853 | promise->para = para; | |
854 | ||
855 | /* Then add it to the contract */ | |
856 | list_add_tail(&promise->list, &contract->demands); | |
857 | } | |
858 | ||
859 | /* | |
860 | * Once we've got all the promises ready, add the contract | |
861 | * to the pending list on the vchan | |
862 | */ | |
863 | return vchan_tx_prep(&vchan->vc, &contract->vd, flags); | |
864 | } | |
865 | ||
866 | static int sun4i_dma_terminate_all(struct dma_chan *chan) | |
867 | { | |
868 | struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); | |
869 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
870 | struct sun4i_dma_pchan *pchan = vchan->pchan; | |
871 | LIST_HEAD(head); | |
872 | unsigned long flags; | |
873 | ||
874 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
875 | vchan_get_all_descriptors(&vchan->vc, &head); | |
876 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
877 | ||
878 | /* | |
879 | * Clearing the configuration register will halt the pchan. Interrupts | |
880 | * may still trigger, so don't forget to disable them. | |
881 | */ | |
882 | if (pchan) { | |
883 | if (pchan->is_dedicated) | |
884 | writel(0, pchan->base + SUN4I_DDMA_CFG_REG); | |
885 | else | |
886 | writel(0, pchan->base + SUN4I_NDMA_CFG_REG); | |
887 | set_pchan_interrupt(priv, pchan, 0, 0); | |
888 | release_pchan(priv, pchan); | |
889 | } | |
890 | ||
891 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
892 | vchan_dma_desc_free_list(&vchan->vc, &head); | |
893 | /* Clear these so the vchan is usable again */ | |
894 | vchan->processing = NULL; | |
895 | vchan->pchan = NULL; | |
896 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
897 | ||
898 | return 0; | |
899 | } | |
900 | ||
901 | static int sun4i_dma_config(struct dma_chan *chan, | |
902 | struct dma_slave_config *config) | |
903 | { | |
904 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
905 | ||
906 | memcpy(&vchan->cfg, config, sizeof(*config)); | |
907 | ||
908 | return 0; | |
909 | } | |
910 | ||
911 | static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec, | |
912 | struct of_dma *ofdma) | |
913 | { | |
914 | struct sun4i_dma_dev *priv = ofdma->of_dma_data; | |
915 | struct sun4i_dma_vchan *vchan; | |
916 | struct dma_chan *chan; | |
917 | u8 is_dedicated = dma_spec->args[0]; | |
918 | u8 endpoint = dma_spec->args[1]; | |
919 | ||
920 | /* Check if type is Normal or Dedicated */ | |
921 | if (is_dedicated != 0 && is_dedicated != 1) | |
922 | return NULL; | |
923 | ||
924 | /* Make sure the endpoint looks sane */ | |
925 | if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) || | |
926 | (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT)) | |
927 | return NULL; | |
928 | ||
929 | chan = dma_get_any_slave_channel(&priv->slave); | |
930 | if (!chan) | |
931 | return NULL; | |
932 | ||
933 | /* Assign the endpoint to the vchan */ | |
934 | vchan = to_sun4i_dma_vchan(chan); | |
935 | vchan->is_dedicated = is_dedicated; | |
936 | vchan->endpoint = endpoint; | |
937 | ||
938 | return chan; | |
939 | } | |
940 | ||
941 | static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan, | |
942 | dma_cookie_t cookie, | |
943 | struct dma_tx_state *state) | |
944 | { | |
945 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
946 | struct sun4i_dma_pchan *pchan = vchan->pchan; | |
947 | struct sun4i_dma_contract *contract; | |
948 | struct sun4i_dma_promise *promise; | |
949 | struct virt_dma_desc *vd; | |
950 | unsigned long flags; | |
951 | enum dma_status ret; | |
952 | size_t bytes = 0; | |
953 | ||
954 | ret = dma_cookie_status(chan, cookie, state); | |
955 | if (!state || (ret == DMA_COMPLETE)) | |
956 | return ret; | |
957 | ||
958 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
959 | vd = vchan_find_desc(&vchan->vc, cookie); | |
960 | if (!vd) | |
961 | goto exit; | |
962 | contract = to_sun4i_dma_contract(vd); | |
963 | ||
964 | list_for_each_entry(promise, &contract->demands, list) | |
965 | bytes += promise->len; | |
966 | ||
967 | /* | |
968 | * The hardware is configured to return the remaining byte | |
969 | * quantity. If possible, replace the first listed element's | |
970 | * full size with the actual remaining amount | |
971 | */ | |
972 | promise = list_first_entry_or_null(&contract->demands, | |
973 | struct sun4i_dma_promise, list); | |
974 | if (promise && pchan) { | |
975 | bytes -= promise->len; | |
976 | if (pchan->is_dedicated) | |
977 | bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); | |
978 | else | |
979 | bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG); | |
980 | } | |
981 | ||
982 | exit: | |
983 | ||
984 | dma_set_residue(state, bytes); | |
985 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
986 | ||
987 | return ret; | |
988 | } | |
989 | ||
990 | static void sun4i_dma_issue_pending(struct dma_chan *chan) | |
991 | { | |
992 | struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); | |
993 | struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan); | |
994 | unsigned long flags; | |
995 | ||
996 | spin_lock_irqsave(&vchan->vc.lock, flags); | |
997 | ||
998 | /* | |
999 | * If there are pending transactions for this vchan, push one of | |
1000 | * them into the engine to get the ball rolling. | |
1001 | */ | |
1002 | if (vchan_issue_pending(&vchan->vc)) | |
1003 | __execute_vchan_pending(priv, vchan); | |
1004 | ||
1005 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | |
1006 | } | |
1007 | ||
1008 | static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id) | |
1009 | { | |
1010 | struct sun4i_dma_dev *priv = dev_id; | |
1011 | struct sun4i_dma_pchan *pchans = priv->pchans, *pchan; | |
1012 | struct sun4i_dma_vchan *vchan; | |
1013 | struct sun4i_dma_contract *contract; | |
1014 | struct sun4i_dma_promise *promise; | |
1015 | unsigned long pendirq, irqs, disableirqs; | |
1016 | int bit, i, free_room, allow_mitigation = 1; | |
1017 | ||
1018 | pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); | |
1019 | ||
1020 | handle_pending: | |
1021 | ||
1022 | disableirqs = 0; | |
1023 | free_room = 0; | |
1024 | ||
1025 | for_each_set_bit(bit, &pendirq, 32) { | |
1026 | pchan = &pchans[bit >> 1]; | |
1027 | vchan = pchan->vchan; | |
1028 | if (!vchan) /* a terminated channel may still interrupt */ | |
1029 | continue; | |
1030 | contract = vchan->contract; | |
1031 | ||
1032 | /* | |
1033 | * Disable the IRQ and free the pchan if it's an end | |
1034 | * interrupt (odd bit) | |
1035 | */ | |
1036 | if (bit & 1) { | |
1037 | spin_lock(&vchan->vc.lock); | |
1038 | ||
1039 | /* | |
1040 | * Move the promise into the completed list now that | |
1041 | * we're done with it | |
1042 | */ | |
1043 | list_del(&vchan->processing->list); | |
1044 | list_add_tail(&vchan->processing->list, | |
1045 | &contract->completed_demands); | |
1046 | ||
1047 | /* | |
1048 | * Cyclic DMA transfers are special: | |
1049 | * - There's always something we can dispatch | |
1050 | * - We need to run the callback | |
1051 | * - Latency is very important, as this is used by audio | |
1052 | * We therefore just cycle through the list and dispatch | |
1053 | * whatever we have here, reusing the pchan. There's | |
1054 | * no need to run the thread after this. | |
1055 | * | |
1056 | * For non-cyclic transfers we need to look around, | |
1057 | * so we can program some more work, or notify the | |
1058 | * client that their transfers have been completed. | |
1059 | */ | |
1060 | if (contract->is_cyclic) { | |
1061 | promise = get_next_cyclic_promise(contract); | |
1062 | vchan->processing = promise; | |
1063 | configure_pchan(pchan, promise); | |
1064 | vchan_cyclic_callback(&contract->vd); | |
1065 | } else { | |
1066 | vchan->processing = NULL; | |
1067 | vchan->pchan = NULL; | |
1068 | ||
1069 | free_room = 1; | |
1070 | disableirqs |= BIT(bit); | |
1071 | release_pchan(priv, pchan); | |
1072 | } | |
1073 | ||
1074 | spin_unlock(&vchan->vc.lock); | |
1075 | } else { | |
1076 | /* Half done interrupt */ | |
1077 | if (contract->is_cyclic) | |
1078 | vchan_cyclic_callback(&contract->vd); | |
1079 | else | |
1080 | disableirqs |= BIT(bit); | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | /* Disable the IRQs for events we handled */ | |
1085 | spin_lock(&priv->lock); | |
1086 | irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG); | |
1087 | writel_relaxed(irqs & ~disableirqs, | |
1088 | priv->base + SUN4I_DMA_IRQ_ENABLE_REG); | |
1089 | spin_unlock(&priv->lock); | |
1090 | ||
1091 | /* Writing 1 to the pending field will clear the pending interrupt */ | |
1092 | writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); | |
1093 | ||
1094 | /* | |
1095 | * If a pchan was freed, we may be able to schedule something else, | |
1096 | * so have a look around | |
1097 | */ | |
1098 | if (free_room) { | |
1099 | for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) { | |
1100 | vchan = &priv->vchans[i]; | |
1101 | spin_lock(&vchan->vc.lock); | |
1102 | __execute_vchan_pending(priv, vchan); | |
1103 | spin_unlock(&vchan->vc.lock); | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | /* | |
1108 | * Handle newer interrupts if some showed up, but only do it once | |
1109 | * to avoid a too long a loop | |
1110 | */ | |
1111 | if (allow_mitigation) { | |
1112 | pendirq = readl_relaxed(priv->base + | |
1113 | SUN4I_DMA_IRQ_PENDING_STATUS_REG); | |
1114 | if (pendirq) { | |
1115 | allow_mitigation = 0; | |
1116 | goto handle_pending; | |
1117 | } | |
1118 | } | |
1119 | ||
1120 | return IRQ_HANDLED; | |
1121 | } | |
1122 | ||
1123 | static int sun4i_dma_probe(struct platform_device *pdev) | |
1124 | { | |
1125 | struct sun4i_dma_dev *priv; | |
1126 | struct resource *res; | |
1127 | int i, j, ret; | |
1128 | ||
1129 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | |
1130 | if (!priv) | |
1131 | return -ENOMEM; | |
1132 | ||
1133 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1134 | priv->base = devm_ioremap_resource(&pdev->dev, res); | |
1135 | if (IS_ERR(priv->base)) | |
1136 | return PTR_ERR(priv->base); | |
1137 | ||
1138 | priv->irq = platform_get_irq(pdev, 0); | |
1139 | if (priv->irq < 0) { | |
1140 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | |
1141 | return priv->irq; | |
1142 | } | |
1143 | ||
1144 | priv->clk = devm_clk_get(&pdev->dev, NULL); | |
1145 | if (IS_ERR(priv->clk)) { | |
1146 | dev_err(&pdev->dev, "No clock specified\n"); | |
1147 | return PTR_ERR(priv->clk); | |
1148 | } | |
1149 | ||
1150 | platform_set_drvdata(pdev, priv); | |
1151 | spin_lock_init(&priv->lock); | |
1152 | ||
1153 | dma_cap_zero(priv->slave.cap_mask); | |
1154 | dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask); | |
1155 | dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask); | |
1156 | dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask); | |
1157 | dma_cap_set(DMA_SLAVE, priv->slave.cap_mask); | |
1158 | ||
1159 | INIT_LIST_HEAD(&priv->slave.channels); | |
1160 | priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources; | |
1161 | priv->slave.device_tx_status = sun4i_dma_tx_status; | |
1162 | priv->slave.device_issue_pending = sun4i_dma_issue_pending; | |
1163 | priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg; | |
1164 | priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy; | |
1165 | priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic; | |
1166 | priv->slave.device_config = sun4i_dma_config; | |
1167 | priv->slave.device_terminate_all = sun4i_dma_terminate_all; | |
1168 | priv->slave.copy_align = 2; | |
1169 | priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | |
1170 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1171 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1172 | priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | |
1173 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1174 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1175 | priv->slave.directions = BIT(DMA_DEV_TO_MEM) | | |
1176 | BIT(DMA_MEM_TO_DEV); | |
1177 | priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1178 | ||
1179 | priv->slave.dev = &pdev->dev; | |
1180 | ||
1181 | priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS, | |
1182 | sizeof(struct sun4i_dma_pchan), GFP_KERNEL); | |
1183 | priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS, | |
1184 | sizeof(struct sun4i_dma_vchan), GFP_KERNEL); | |
1185 | if (!priv->vchans || !priv->pchans) | |
1186 | return -ENOMEM; | |
1187 | ||
1188 | /* | |
1189 | * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and | |
1190 | * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are | |
1191 | * dedicated ones | |
1192 | */ | |
1193 | for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++) | |
1194 | priv->pchans[i].base = priv->base + | |
1195 | SUN4I_NDMA_CHANNEL_REG_BASE(i); | |
1196 | ||
1197 | for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) { | |
1198 | priv->pchans[i].base = priv->base + | |
1199 | SUN4I_DDMA_CHANNEL_REG_BASE(j); | |
1200 | priv->pchans[i].is_dedicated = 1; | |
1201 | } | |
1202 | ||
1203 | for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) { | |
1204 | struct sun4i_dma_vchan *vchan = &priv->vchans[i]; | |
1205 | ||
1206 | spin_lock_init(&vchan->vc.lock); | |
1207 | vchan->vc.desc_free = sun4i_dma_free_contract; | |
1208 | vchan_init(&vchan->vc, &priv->slave); | |
1209 | } | |
1210 | ||
1211 | ret = clk_prepare_enable(priv->clk); | |
1212 | if (ret) { | |
1213 | dev_err(&pdev->dev, "Couldn't enable the clock\n"); | |
1214 | return ret; | |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * Make sure the IRQs are all disabled and accounted for. The bootloader | |
1219 | * likes to leave these dirty | |
1220 | */ | |
1221 | writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG); | |
1222 | writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG); | |
1223 | ||
1224 | ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt, | |
1225 | 0, dev_name(&pdev->dev), priv); | |
1226 | if (ret) { | |
1227 | dev_err(&pdev->dev, "Cannot request IRQ\n"); | |
1228 | goto err_clk_disable; | |
1229 | } | |
1230 | ||
1231 | ret = dma_async_device_register(&priv->slave); | |
1232 | if (ret) { | |
1233 | dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); | |
1234 | goto err_clk_disable; | |
1235 | } | |
1236 | ||
1237 | ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate, | |
1238 | priv); | |
1239 | if (ret) { | |
1240 | dev_err(&pdev->dev, "of_dma_controller_register failed\n"); | |
1241 | goto err_dma_unregister; | |
1242 | } | |
1243 | ||
1244 | dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n"); | |
1245 | ||
1246 | return 0; | |
1247 | ||
1248 | err_dma_unregister: | |
1249 | dma_async_device_unregister(&priv->slave); | |
1250 | err_clk_disable: | |
1251 | clk_disable_unprepare(priv->clk); | |
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | static int sun4i_dma_remove(struct platform_device *pdev) | |
1256 | { | |
1257 | struct sun4i_dma_dev *priv = platform_get_drvdata(pdev); | |
1258 | ||
1259 | /* Disable IRQ so no more work is scheduled */ | |
1260 | disable_irq(priv->irq); | |
1261 | ||
1262 | of_dma_controller_free(pdev->dev.of_node); | |
1263 | dma_async_device_unregister(&priv->slave); | |
1264 | ||
1265 | clk_disable_unprepare(priv->clk); | |
1266 | ||
1267 | return 0; | |
1268 | } | |
1269 | ||
1270 | static const struct of_device_id sun4i_dma_match[] = { | |
1271 | { .compatible = "allwinner,sun4i-a10-dma" }, | |
1272 | { /* sentinel */ }, | |
1273 | }; | |
94c622b2 | 1274 | MODULE_DEVICE_TABLE(of, sun4i_dma_match); |
b096c137 EL |
1275 | |
1276 | static struct platform_driver sun4i_dma_driver = { | |
1277 | .probe = sun4i_dma_probe, | |
1278 | .remove = sun4i_dma_remove, | |
1279 | .driver = { | |
1280 | .name = "sun4i-dma", | |
1281 | .of_match_table = sun4i_dma_match, | |
1282 | }, | |
1283 | }; | |
1284 | ||
1285 | module_platform_driver(sun4i_dma_driver); | |
1286 | ||
1287 | MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver"); | |
1288 | MODULE_AUTHOR("Emilio López <[email protected]>"); | |
1289 | MODULE_LICENSE("GPL"); |