]>
Commit | Line | Data |
---|---|---|
ea76f0b3 AN |
1 | /* |
2 | * Driver for the TXx9 SoC DMA Controller | |
3 | * | |
4 | * Copyright (C) 2009 Atsushi Nemoto | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/platform_device.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/scatterlist.h> | |
d2ebfb33 RKAL |
18 | |
19 | #include "dmaengine.h" | |
ea76f0b3 AN |
20 | #include "txx9dmac.h" |
21 | ||
22 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) | |
23 | { | |
24 | return container_of(chan, struct txx9dmac_chan, chan); | |
25 | } | |
26 | ||
27 | static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) | |
28 | { | |
29 | return dc->ch_regs; | |
30 | } | |
31 | ||
32 | static struct txx9dmac_cregs32 __iomem *__dma_regs32( | |
33 | const struct txx9dmac_chan *dc) | |
34 | { | |
35 | return dc->ch_regs; | |
36 | } | |
37 | ||
38 | #define channel64_readq(dc, name) \ | |
39 | __raw_readq(&(__dma_regs(dc)->name)) | |
40 | #define channel64_writeq(dc, name, val) \ | |
41 | __raw_writeq((val), &(__dma_regs(dc)->name)) | |
42 | #define channel64_readl(dc, name) \ | |
43 | __raw_readl(&(__dma_regs(dc)->name)) | |
44 | #define channel64_writel(dc, name, val) \ | |
45 | __raw_writel((val), &(__dma_regs(dc)->name)) | |
46 | ||
47 | #define channel32_readl(dc, name) \ | |
48 | __raw_readl(&(__dma_regs32(dc)->name)) | |
49 | #define channel32_writel(dc, name, val) \ | |
50 | __raw_writel((val), &(__dma_regs32(dc)->name)) | |
51 | ||
52 | #define channel_readq(dc, name) channel64_readq(dc, name) | |
53 | #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) | |
54 | #define channel_readl(dc, name) \ | |
55 | (is_dmac64(dc) ? \ | |
56 | channel64_readl(dc, name) : channel32_readl(dc, name)) | |
57 | #define channel_writel(dc, name, val) \ | |
58 | (is_dmac64(dc) ? \ | |
59 | channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) | |
60 | ||
61 | static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) | |
62 | { | |
63 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | |
64 | return channel64_readq(dc, CHAR); | |
65 | else | |
66 | return channel64_readl(dc, CHAR); | |
67 | } | |
68 | ||
69 | static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | |
70 | { | |
71 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | |
72 | channel64_writeq(dc, CHAR, val); | |
73 | else | |
74 | channel64_writel(dc, CHAR, val); | |
75 | } | |
76 | ||
77 | static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) | |
78 | { | |
34adb28d | 79 | #if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) |
ea76f0b3 AN |
80 | channel64_writel(dc, CHAR, 0); |
81 | channel64_writel(dc, __pad_CHAR, 0); | |
82 | #else | |
83 | channel64_writeq(dc, CHAR, 0); | |
84 | #endif | |
85 | } | |
86 | ||
87 | static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) | |
88 | { | |
89 | if (is_dmac64(dc)) | |
90 | return channel64_read_CHAR(dc); | |
91 | else | |
92 | return channel32_readl(dc, CHAR); | |
93 | } | |
94 | ||
95 | static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | |
96 | { | |
97 | if (is_dmac64(dc)) | |
98 | channel64_write_CHAR(dc, val); | |
99 | else | |
100 | channel32_writel(dc, CHAR, val); | |
101 | } | |
102 | ||
103 | static struct txx9dmac_regs __iomem *__txx9dmac_regs( | |
104 | const struct txx9dmac_dev *ddev) | |
105 | { | |
106 | return ddev->regs; | |
107 | } | |
108 | ||
109 | static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( | |
110 | const struct txx9dmac_dev *ddev) | |
111 | { | |
112 | return ddev->regs; | |
113 | } | |
114 | ||
115 | #define dma64_readl(ddev, name) \ | |
116 | __raw_readl(&(__txx9dmac_regs(ddev)->name)) | |
117 | #define dma64_writel(ddev, name, val) \ | |
118 | __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) | |
119 | ||
120 | #define dma32_readl(ddev, name) \ | |
121 | __raw_readl(&(__txx9dmac_regs32(ddev)->name)) | |
122 | #define dma32_writel(ddev, name, val) \ | |
123 | __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) | |
124 | ||
125 | #define dma_readl(ddev, name) \ | |
126 | (__is_dmac64(ddev) ? \ | |
127 | dma64_readl(ddev, name) : dma32_readl(ddev, name)) | |
128 | #define dma_writel(ddev, name, val) \ | |
129 | (__is_dmac64(ddev) ? \ | |
130 | dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) | |
131 | ||
132 | static struct device *chan2dev(struct dma_chan *chan) | |
133 | { | |
134 | return &chan->dev->device; | |
135 | } | |
136 | static struct device *chan2parent(struct dma_chan *chan) | |
137 | { | |
138 | return chan->dev->device.parent; | |
139 | } | |
140 | ||
141 | static struct txx9dmac_desc * | |
142 | txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) | |
143 | { | |
144 | return container_of(txd, struct txx9dmac_desc, txd); | |
145 | } | |
146 | ||
147 | static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, | |
148 | const struct txx9dmac_desc *desc) | |
149 | { | |
150 | return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; | |
151 | } | |
152 | ||
153 | static void desc_write_CHAR(const struct txx9dmac_chan *dc, | |
154 | struct txx9dmac_desc *desc, dma_addr_t val) | |
155 | { | |
156 | if (is_dmac64(dc)) | |
157 | desc->hwdesc.CHAR = val; | |
158 | else | |
159 | desc->hwdesc32.CHAR = val; | |
160 | } | |
161 | ||
162 | #define TXX9_DMA_MAX_COUNT 0x04000000 | |
163 | ||
164 | #define TXX9_DMA_INITIAL_DESC_COUNT 64 | |
165 | ||
166 | static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) | |
167 | { | |
168 | return list_entry(dc->active_list.next, | |
169 | struct txx9dmac_desc, desc_node); | |
170 | } | |
171 | ||
172 | static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) | |
173 | { | |
174 | return list_entry(dc->active_list.prev, | |
175 | struct txx9dmac_desc, desc_node); | |
176 | } | |
177 | ||
178 | static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) | |
179 | { | |
180 | return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); | |
181 | } | |
182 | ||
183 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) | |
184 | { | |
1979b186 DW |
185 | if (!list_empty(&desc->tx_list)) |
186 | desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); | |
ea76f0b3 AN |
187 | return desc; |
188 | } | |
189 | ||
190 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); | |
191 | ||
192 | static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, | |
193 | gfp_t flags) | |
194 | { | |
195 | struct txx9dmac_dev *ddev = dc->ddev; | |
196 | struct txx9dmac_desc *desc; | |
197 | ||
198 | desc = kzalloc(sizeof(*desc), flags); | |
199 | if (!desc) | |
200 | return NULL; | |
1979b186 | 201 | INIT_LIST_HEAD(&desc->tx_list); |
ea76f0b3 AN |
202 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); |
203 | desc->txd.tx_submit = txx9dmac_tx_submit; | |
204 | /* txd.flags will be overwritten in prep funcs */ | |
205 | desc->txd.flags = DMA_CTRL_ACK; | |
206 | desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, | |
207 | ddev->descsize, DMA_TO_DEVICE); | |
208 | return desc; | |
209 | } | |
210 | ||
211 | static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) | |
212 | { | |
213 | struct txx9dmac_desc *desc, *_desc; | |
214 | struct txx9dmac_desc *ret = NULL; | |
215 | unsigned int i = 0; | |
216 | ||
217 | spin_lock_bh(&dc->lock); | |
218 | list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { | |
219 | if (async_tx_test_ack(&desc->txd)) { | |
220 | list_del(&desc->desc_node); | |
221 | ret = desc; | |
222 | break; | |
223 | } | |
224 | dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); | |
225 | i++; | |
226 | } | |
227 | spin_unlock_bh(&dc->lock); | |
228 | ||
229 | dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", | |
230 | i); | |
231 | if (!ret) { | |
232 | ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); | |
233 | if (ret) { | |
234 | spin_lock_bh(&dc->lock); | |
235 | dc->descs_allocated++; | |
236 | spin_unlock_bh(&dc->lock); | |
237 | } else | |
238 | dev_err(chan2dev(&dc->chan), | |
239 | "not enough descriptors available\n"); | |
240 | } | |
241 | return ret; | |
242 | } | |
243 | ||
244 | static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, | |
245 | struct txx9dmac_desc *desc) | |
246 | { | |
247 | struct txx9dmac_dev *ddev = dc->ddev; | |
248 | struct txx9dmac_desc *child; | |
249 | ||
1979b186 | 250 | list_for_each_entry(child, &desc->tx_list, desc_node) |
ea76f0b3 AN |
251 | dma_sync_single_for_cpu(chan2parent(&dc->chan), |
252 | child->txd.phys, ddev->descsize, | |
253 | DMA_TO_DEVICE); | |
254 | dma_sync_single_for_cpu(chan2parent(&dc->chan), | |
255 | desc->txd.phys, ddev->descsize, | |
256 | DMA_TO_DEVICE); | |
257 | } | |
258 | ||
259 | /* | |
260 | * Move a descriptor, including any children, to the free list. | |
261 | * `desc' must not be on any lists. | |
262 | */ | |
263 | static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | |
264 | struct txx9dmac_desc *desc) | |
265 | { | |
266 | if (desc) { | |
267 | struct txx9dmac_desc *child; | |
268 | ||
269 | txx9dmac_sync_desc_for_cpu(dc, desc); | |
270 | ||
271 | spin_lock_bh(&dc->lock); | |
1979b186 | 272 | list_for_each_entry(child, &desc->tx_list, desc_node) |
ea76f0b3 AN |
273 | dev_vdbg(chan2dev(&dc->chan), |
274 | "moving child desc %p to freelist\n", | |
275 | child); | |
1979b186 | 276 | list_splice_init(&desc->tx_list, &dc->free_list); |
ea76f0b3 AN |
277 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", |
278 | desc); | |
279 | list_add(&desc->desc_node, &dc->free_list); | |
280 | spin_unlock_bh(&dc->lock); | |
281 | } | |
282 | } | |
283 | ||
ea76f0b3 AN |
284 | /*----------------------------------------------------------------------*/ |
285 | ||
286 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) | |
287 | { | |
288 | if (is_dmac64(dc)) | |
289 | dev_err(chan2dev(&dc->chan), | |
290 | " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" | |
291 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | |
292 | (u64)channel64_read_CHAR(dc), | |
293 | channel64_readq(dc, SAR), | |
294 | channel64_readq(dc, DAR), | |
295 | channel64_readl(dc, CNTR), | |
296 | channel64_readl(dc, SAIR), | |
297 | channel64_readl(dc, DAIR), | |
298 | channel64_readl(dc, CCR), | |
299 | channel64_readl(dc, CSR)); | |
300 | else | |
301 | dev_err(chan2dev(&dc->chan), | |
302 | " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" | |
303 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | |
304 | channel32_readl(dc, CHAR), | |
305 | channel32_readl(dc, SAR), | |
306 | channel32_readl(dc, DAR), | |
307 | channel32_readl(dc, CNTR), | |
308 | channel32_readl(dc, SAIR), | |
309 | channel32_readl(dc, DAIR), | |
310 | channel32_readl(dc, CCR), | |
311 | channel32_readl(dc, CSR)); | |
312 | } | |
313 | ||
314 | static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) | |
315 | { | |
316 | channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); | |
317 | if (is_dmac64(dc)) { | |
318 | channel64_clear_CHAR(dc); | |
319 | channel_writeq(dc, SAR, 0); | |
320 | channel_writeq(dc, DAR, 0); | |
321 | } else { | |
322 | channel_writel(dc, CHAR, 0); | |
323 | channel_writel(dc, SAR, 0); | |
324 | channel_writel(dc, DAR, 0); | |
325 | } | |
326 | channel_writel(dc, CNTR, 0); | |
327 | channel_writel(dc, SAIR, 0); | |
328 | channel_writel(dc, DAIR, 0); | |
329 | channel_writel(dc, CCR, 0); | |
330 | mmiowb(); | |
331 | } | |
332 | ||
333 | /* Called with dc->lock held and bh disabled */ | |
334 | static void txx9dmac_dostart(struct txx9dmac_chan *dc, | |
335 | struct txx9dmac_desc *first) | |
336 | { | |
337 | struct txx9dmac_slave *ds = dc->chan.private; | |
338 | u32 sai, dai; | |
339 | ||
340 | dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", | |
341 | first->txd.cookie, first); | |
342 | /* ASSERT: channel is idle */ | |
343 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | |
344 | dev_err(chan2dev(&dc->chan), | |
345 | "BUG: Attempted to start non-idle channel\n"); | |
346 | txx9dmac_dump_regs(dc); | |
347 | /* The tasklet will hopefully advance the queue... */ | |
348 | return; | |
349 | } | |
350 | ||
351 | if (is_dmac64(dc)) { | |
352 | channel64_writel(dc, CNTR, 0); | |
353 | channel64_writel(dc, CSR, 0xffffffff); | |
354 | if (ds) { | |
355 | if (ds->tx_reg) { | |
356 | sai = ds->reg_width; | |
357 | dai = 0; | |
358 | } else { | |
359 | sai = 0; | |
360 | dai = ds->reg_width; | |
361 | } | |
362 | } else { | |
363 | sai = 8; | |
364 | dai = 8; | |
365 | } | |
366 | channel64_writel(dc, SAIR, sai); | |
367 | channel64_writel(dc, DAIR, dai); | |
368 | /* All 64-bit DMAC supports SMPCHN */ | |
369 | channel64_writel(dc, CCR, dc->ccr); | |
370 | /* Writing a non zero value to CHAR will assert XFACT */ | |
371 | channel64_write_CHAR(dc, first->txd.phys); | |
372 | } else { | |
373 | channel32_writel(dc, CNTR, 0); | |
374 | channel32_writel(dc, CSR, 0xffffffff); | |
375 | if (ds) { | |
376 | if (ds->tx_reg) { | |
377 | sai = ds->reg_width; | |
378 | dai = 0; | |
379 | } else { | |
380 | sai = 0; | |
381 | dai = ds->reg_width; | |
382 | } | |
383 | } else { | |
384 | sai = 4; | |
385 | dai = 4; | |
386 | } | |
387 | channel32_writel(dc, SAIR, sai); | |
388 | channel32_writel(dc, DAIR, dai); | |
389 | if (txx9_dma_have_SMPCHN()) { | |
390 | channel32_writel(dc, CCR, dc->ccr); | |
391 | /* Writing a non zero value to CHAR will assert XFACT */ | |
392 | channel32_writel(dc, CHAR, first->txd.phys); | |
393 | } else { | |
394 | channel32_writel(dc, CHAR, first->txd.phys); | |
395 | channel32_writel(dc, CCR, dc->ccr); | |
396 | } | |
397 | } | |
398 | } | |
399 | ||
400 | /*----------------------------------------------------------------------*/ | |
401 | ||
402 | static void | |
403 | txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | |
404 | struct txx9dmac_desc *desc) | |
405 | { | |
406 | dma_async_tx_callback callback; | |
407 | void *param; | |
408 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
ea76f0b3 AN |
409 | |
410 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | |
411 | txd->cookie, desc); | |
412 | ||
f7fbce07 | 413 | dma_cookie_complete(txd); |
ea76f0b3 AN |
414 | callback = txd->callback; |
415 | param = txd->callback_param; | |
416 | ||
417 | txx9dmac_sync_desc_for_cpu(dc, desc); | |
1979b186 | 418 | list_splice_init(&desc->tx_list, &dc->free_list); |
ea76f0b3 AN |
419 | list_move(&desc->desc_node, &dc->free_list); |
420 | ||
d38a8c62 | 421 | dma_descriptor_unmap(txd); |
ea76f0b3 AN |
422 | /* |
423 | * The API requires that no submissions are done from a | |
424 | * callback, so we don't need to drop the lock here | |
425 | */ | |
426 | if (callback) | |
427 | callback(param); | |
428 | dma_run_dependencies(txd); | |
429 | } | |
430 | ||
431 | static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) | |
432 | { | |
433 | struct txx9dmac_dev *ddev = dc->ddev; | |
434 | struct txx9dmac_desc *desc; | |
435 | struct txx9dmac_desc *prev = NULL; | |
436 | ||
437 | BUG_ON(!list_empty(list)); | |
438 | do { | |
439 | desc = txx9dmac_first_queued(dc); | |
440 | if (prev) { | |
441 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
442 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
443 | prev->txd.phys, ddev->descsize, | |
444 | DMA_TO_DEVICE); | |
445 | } | |
446 | prev = txx9dmac_last_child(desc); | |
447 | list_move_tail(&desc->desc_node, list); | |
448 | /* Make chain-completion interrupt happen */ | |
449 | if ((desc->txd.flags & DMA_PREP_INTERRUPT) && | |
450 | !txx9dmac_chan_INTENT(dc)) | |
451 | break; | |
452 | } while (!list_empty(&dc->queue)); | |
453 | } | |
454 | ||
455 | static void txx9dmac_complete_all(struct txx9dmac_chan *dc) | |
456 | { | |
457 | struct txx9dmac_desc *desc, *_desc; | |
458 | LIST_HEAD(list); | |
459 | ||
460 | /* | |
461 | * Submit queued descriptors ASAP, i.e. before we go through | |
462 | * the completed ones. | |
463 | */ | |
464 | list_splice_init(&dc->active_list, &list); | |
465 | if (!list_empty(&dc->queue)) { | |
466 | txx9dmac_dequeue(dc, &dc->active_list); | |
467 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
468 | } | |
469 | ||
470 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
471 | txx9dmac_descriptor_complete(dc, desc); | |
472 | } | |
473 | ||
474 | static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, | |
475 | struct txx9dmac_hwdesc *desc) | |
476 | { | |
477 | if (is_dmac64(dc)) { | |
478 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | |
479 | dev_crit(chan2dev(&dc->chan), | |
480 | " desc: ch%#llx s%#llx d%#llx c%#x\n", | |
481 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); | |
482 | #else | |
483 | dev_crit(chan2dev(&dc->chan), | |
484 | " desc: ch%#llx s%#llx d%#llx c%#x" | |
485 | " si%#x di%#x cc%#x cs%#x\n", | |
486 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, | |
487 | desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); | |
488 | #endif | |
489 | } else { | |
490 | struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; | |
491 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | |
492 | dev_crit(chan2dev(&dc->chan), | |
493 | " desc: ch%#x s%#x d%#x c%#x\n", | |
494 | d->CHAR, d->SAR, d->DAR, d->CNTR); | |
495 | #else | |
496 | dev_crit(chan2dev(&dc->chan), | |
497 | " desc: ch%#x s%#x d%#x c%#x" | |
498 | " si%#x di%#x cc%#x cs%#x\n", | |
499 | d->CHAR, d->SAR, d->DAR, d->CNTR, | |
500 | d->SAIR, d->DAIR, d->CCR, d->CSR); | |
501 | #endif | |
502 | } | |
503 | } | |
504 | ||
505 | static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) | |
506 | { | |
507 | struct txx9dmac_desc *bad_desc; | |
508 | struct txx9dmac_desc *child; | |
509 | u32 errors; | |
510 | ||
511 | /* | |
512 | * The descriptor currently at the head of the active list is | |
513 | * borked. Since we don't have any way to report errors, we'll | |
514 | * just have to scream loudly and try to carry on. | |
515 | */ | |
516 | dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); | |
517 | txx9dmac_dump_regs(dc); | |
518 | ||
519 | bad_desc = txx9dmac_first_active(dc); | |
520 | list_del_init(&bad_desc->desc_node); | |
521 | ||
522 | /* Clear all error flags and try to restart the controller */ | |
523 | errors = csr & (TXX9_DMA_CSR_ABCHC | | |
524 | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | | |
525 | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); | |
526 | channel_writel(dc, CSR, errors); | |
527 | ||
528 | if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) | |
529 | txx9dmac_dequeue(dc, &dc->active_list); | |
530 | if (!list_empty(&dc->active_list)) | |
531 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
532 | ||
533 | dev_crit(chan2dev(&dc->chan), | |
534 | "Bad descriptor submitted for DMA! (cookie: %d)\n", | |
535 | bad_desc->txd.cookie); | |
536 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); | |
1979b186 | 537 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
ea76f0b3 AN |
538 | txx9dmac_dump_desc(dc, &child->hwdesc); |
539 | /* Pretend the descriptor completed successfully */ | |
540 | txx9dmac_descriptor_complete(dc, bad_desc); | |
541 | } | |
542 | ||
543 | static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) | |
544 | { | |
545 | dma_addr_t chain; | |
546 | struct txx9dmac_desc *desc, *_desc; | |
547 | struct txx9dmac_desc *child; | |
548 | u32 csr; | |
549 | ||
550 | if (is_dmac64(dc)) { | |
551 | chain = channel64_read_CHAR(dc); | |
552 | csr = channel64_readl(dc, CSR); | |
553 | channel64_writel(dc, CSR, csr); | |
554 | } else { | |
555 | chain = channel32_readl(dc, CHAR); | |
556 | csr = channel32_readl(dc, CSR); | |
557 | channel32_writel(dc, CSR, csr); | |
558 | } | |
559 | /* For dynamic chain, we should look at XFACT instead of NCHNC */ | |
560 | if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { | |
561 | /* Everything we've submitted is done */ | |
562 | txx9dmac_complete_all(dc); | |
563 | return; | |
564 | } | |
565 | if (!(csr & TXX9_DMA_CSR_CHNEN)) | |
566 | chain = 0; /* last descriptor of this chain */ | |
567 | ||
568 | dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", | |
569 | (u64)chain); | |
570 | ||
571 | list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { | |
572 | if (desc_read_CHAR(dc, desc) == chain) { | |
573 | /* This one is currently in progress */ | |
574 | if (csr & TXX9_DMA_CSR_ABCHC) | |
575 | goto scan_done; | |
576 | return; | |
577 | } | |
578 | ||
1979b186 | 579 | list_for_each_entry(child, &desc->tx_list, desc_node) |
ea76f0b3 AN |
580 | if (desc_read_CHAR(dc, child) == chain) { |
581 | /* Currently in progress */ | |
582 | if (csr & TXX9_DMA_CSR_ABCHC) | |
583 | goto scan_done; | |
584 | return; | |
585 | } | |
586 | ||
587 | /* | |
588 | * No descriptors so far seem to be in progress, i.e. | |
589 | * this one must be done. | |
590 | */ | |
591 | txx9dmac_descriptor_complete(dc, desc); | |
592 | } | |
593 | scan_done: | |
594 | if (csr & TXX9_DMA_CSR_ABCHC) { | |
595 | txx9dmac_handle_error(dc, csr); | |
596 | return; | |
597 | } | |
598 | ||
599 | dev_err(chan2dev(&dc->chan), | |
600 | "BUG: All descriptors done, but channel not idle!\n"); | |
601 | ||
602 | /* Try to continue after resetting the channel... */ | |
603 | txx9dmac_reset_chan(dc); | |
604 | ||
605 | if (!list_empty(&dc->queue)) { | |
606 | txx9dmac_dequeue(dc, &dc->active_list); | |
607 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
608 | } | |
609 | } | |
610 | ||
611 | static void txx9dmac_chan_tasklet(unsigned long data) | |
612 | { | |
613 | int irq; | |
614 | u32 csr; | |
615 | struct txx9dmac_chan *dc; | |
616 | ||
617 | dc = (struct txx9dmac_chan *)data; | |
618 | csr = channel_readl(dc, CSR); | |
619 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); | |
620 | ||
621 | spin_lock(&dc->lock); | |
622 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | |
623 | TXX9_DMA_CSR_NTRNFC)) | |
624 | txx9dmac_scan_descriptors(dc); | |
625 | spin_unlock(&dc->lock); | |
626 | irq = dc->irq; | |
627 | ||
628 | enable_irq(irq); | |
629 | } | |
630 | ||
631 | static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) | |
632 | { | |
633 | struct txx9dmac_chan *dc = dev_id; | |
634 | ||
635 | dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", | |
636 | channel_readl(dc, CSR)); | |
637 | ||
638 | tasklet_schedule(&dc->tasklet); | |
639 | /* | |
640 | * Just disable the interrupts. We'll turn them back on in the | |
641 | * softirq handler. | |
642 | */ | |
643 | disable_irq_nosync(irq); | |
644 | ||
645 | return IRQ_HANDLED; | |
646 | } | |
647 | ||
648 | static void txx9dmac_tasklet(unsigned long data) | |
649 | { | |
650 | int irq; | |
651 | u32 csr; | |
652 | struct txx9dmac_chan *dc; | |
653 | ||
654 | struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; | |
655 | u32 mcr; | |
656 | int i; | |
657 | ||
658 | mcr = dma_readl(ddev, MCR); | |
659 | dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); | |
660 | for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { | |
661 | if ((mcr >> (24 + i)) & 0x11) { | |
662 | dc = ddev->chan[i]; | |
663 | csr = channel_readl(dc, CSR); | |
664 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", | |
665 | csr); | |
666 | spin_lock(&dc->lock); | |
667 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | |
668 | TXX9_DMA_CSR_NTRNFC)) | |
669 | txx9dmac_scan_descriptors(dc); | |
670 | spin_unlock(&dc->lock); | |
671 | } | |
672 | } | |
673 | irq = ddev->irq; | |
674 | ||
675 | enable_irq(irq); | |
676 | } | |
677 | ||
678 | static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) | |
679 | { | |
680 | struct txx9dmac_dev *ddev = dev_id; | |
681 | ||
682 | dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", | |
683 | dma_readl(ddev, MCR)); | |
684 | ||
685 | tasklet_schedule(&ddev->tasklet); | |
686 | /* | |
687 | * Just disable the interrupts. We'll turn them back on in the | |
688 | * softirq handler. | |
689 | */ | |
690 | disable_irq_nosync(irq); | |
691 | ||
692 | return IRQ_HANDLED; | |
693 | } | |
694 | ||
695 | /*----------------------------------------------------------------------*/ | |
696 | ||
697 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) | |
698 | { | |
699 | struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); | |
700 | struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); | |
701 | dma_cookie_t cookie; | |
702 | ||
703 | spin_lock_bh(&dc->lock); | |
884485e1 | 704 | cookie = dma_cookie_assign(tx); |
ea76f0b3 AN |
705 | |
706 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", | |
707 | desc->txd.cookie, desc); | |
708 | ||
709 | list_add_tail(&desc->desc_node, &dc->queue); | |
710 | spin_unlock_bh(&dc->lock); | |
711 | ||
712 | return cookie; | |
713 | } | |
714 | ||
715 | static struct dma_async_tx_descriptor * | |
716 | txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
717 | size_t len, unsigned long flags) | |
718 | { | |
719 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
720 | struct txx9dmac_dev *ddev = dc->ddev; | |
721 | struct txx9dmac_desc *desc; | |
722 | struct txx9dmac_desc *first; | |
723 | struct txx9dmac_desc *prev; | |
724 | size_t xfer_count; | |
725 | size_t offset; | |
726 | ||
727 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", | |
728 | (u64)dest, (u64)src, len, flags); | |
729 | ||
730 | if (unlikely(!len)) { | |
731 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | |
732 | return NULL; | |
733 | } | |
734 | ||
735 | prev = first = NULL; | |
736 | ||
737 | for (offset = 0; offset < len; offset += xfer_count) { | |
738 | xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); | |
739 | /* | |
740 | * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, | |
741 | * ERT-TX49H4-016 (slightly conservative) | |
742 | */ | |
743 | if (__is_dmac64(ddev)) { | |
744 | if (xfer_count > 0x100 && | |
745 | (xfer_count & 0xff) >= 0xfa && | |
746 | (xfer_count & 0xff) <= 0xff) | |
747 | xfer_count -= 0x20; | |
748 | } else { | |
749 | if (xfer_count > 0x80 && | |
750 | (xfer_count & 0x7f) >= 0x7e && | |
751 | (xfer_count & 0x7f) <= 0x7f) | |
752 | xfer_count -= 0x20; | |
753 | } | |
754 | ||
755 | desc = txx9dmac_desc_get(dc); | |
756 | if (!desc) { | |
757 | txx9dmac_desc_put(dc, first); | |
758 | return NULL; | |
759 | } | |
760 | ||
761 | if (__is_dmac64(ddev)) { | |
762 | desc->hwdesc.SAR = src + offset; | |
763 | desc->hwdesc.DAR = dest + offset; | |
764 | desc->hwdesc.CNTR = xfer_count; | |
765 | txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, | |
766 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
767 | } else { | |
768 | desc->hwdesc32.SAR = src + offset; | |
769 | desc->hwdesc32.DAR = dest + offset; | |
770 | desc->hwdesc32.CNTR = xfer_count; | |
771 | txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, | |
772 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
773 | } | |
774 | ||
775 | /* | |
776 | * The descriptors on tx_list are not reachable from | |
777 | * the dc->queue list or dc->active_list after a | |
778 | * submit. If we put all descriptors on active_list, | |
779 | * calling of callback on the completion will be more | |
780 | * complex. | |
781 | */ | |
782 | if (!first) { | |
783 | first = desc; | |
784 | } else { | |
785 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
786 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
787 | prev->txd.phys, ddev->descsize, | |
788 | DMA_TO_DEVICE); | |
1979b186 | 789 | list_add_tail(&desc->desc_node, &first->tx_list); |
ea76f0b3 AN |
790 | } |
791 | prev = desc; | |
792 | } | |
793 | ||
794 | /* Trigger interrupt after last block */ | |
795 | if (flags & DMA_PREP_INTERRUPT) | |
796 | txx9dmac_desc_set_INTENT(ddev, prev); | |
797 | ||
798 | desc_write_CHAR(dc, prev, 0); | |
799 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
800 | prev->txd.phys, ddev->descsize, | |
801 | DMA_TO_DEVICE); | |
802 | ||
803 | first->txd.flags = flags; | |
804 | first->len = len; | |
805 | ||
806 | return &first->txd; | |
807 | } | |
808 | ||
809 | static struct dma_async_tx_descriptor * | |
810 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
db8196df | 811 | unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f | 812 | unsigned long flags, void *context) |
ea76f0b3 AN |
813 | { |
814 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
815 | struct txx9dmac_dev *ddev = dc->ddev; | |
816 | struct txx9dmac_slave *ds = chan->private; | |
817 | struct txx9dmac_desc *prev; | |
818 | struct txx9dmac_desc *first; | |
819 | unsigned int i; | |
820 | struct scatterlist *sg; | |
821 | ||
822 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | |
823 | ||
824 | BUG_ON(!ds || !ds->reg_width); | |
825 | if (ds->tx_reg) | |
db8196df | 826 | BUG_ON(direction != DMA_MEM_TO_DEV); |
ea76f0b3 | 827 | else |
db8196df | 828 | BUG_ON(direction != DMA_DEV_TO_MEM); |
ea76f0b3 AN |
829 | if (unlikely(!sg_len)) |
830 | return NULL; | |
831 | ||
832 | prev = first = NULL; | |
833 | ||
834 | for_each_sg(sgl, sg, sg_len, i) { | |
835 | struct txx9dmac_desc *desc; | |
836 | dma_addr_t mem; | |
837 | u32 sai, dai; | |
838 | ||
839 | desc = txx9dmac_desc_get(dc); | |
840 | if (!desc) { | |
841 | txx9dmac_desc_put(dc, first); | |
842 | return NULL; | |
843 | } | |
844 | ||
845 | mem = sg_dma_address(sg); | |
846 | ||
847 | if (__is_dmac64(ddev)) { | |
db8196df | 848 | if (direction == DMA_MEM_TO_DEV) { |
ea76f0b3 AN |
849 | desc->hwdesc.SAR = mem; |
850 | desc->hwdesc.DAR = ds->tx_reg; | |
851 | } else { | |
852 | desc->hwdesc.SAR = ds->rx_reg; | |
853 | desc->hwdesc.DAR = mem; | |
854 | } | |
855 | desc->hwdesc.CNTR = sg_dma_len(sg); | |
856 | } else { | |
db8196df | 857 | if (direction == DMA_MEM_TO_DEV) { |
ea76f0b3 AN |
858 | desc->hwdesc32.SAR = mem; |
859 | desc->hwdesc32.DAR = ds->tx_reg; | |
860 | } else { | |
861 | desc->hwdesc32.SAR = ds->rx_reg; | |
862 | desc->hwdesc32.DAR = mem; | |
863 | } | |
864 | desc->hwdesc32.CNTR = sg_dma_len(sg); | |
865 | } | |
db8196df | 866 | if (direction == DMA_MEM_TO_DEV) { |
ea76f0b3 AN |
867 | sai = ds->reg_width; |
868 | dai = 0; | |
869 | } else { | |
870 | sai = 0; | |
871 | dai = ds->reg_width; | |
872 | } | |
873 | txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, | |
874 | dc->ccr | TXX9_DMA_CCR_XFACT); | |
875 | ||
876 | if (!first) { | |
877 | first = desc; | |
878 | } else { | |
879 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
880 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
881 | prev->txd.phys, | |
882 | ddev->descsize, | |
883 | DMA_TO_DEVICE); | |
1979b186 | 884 | list_add_tail(&desc->desc_node, &first->tx_list); |
ea76f0b3 AN |
885 | } |
886 | prev = desc; | |
887 | } | |
888 | ||
889 | /* Trigger interrupt after last block */ | |
890 | if (flags & DMA_PREP_INTERRUPT) | |
891 | txx9dmac_desc_set_INTENT(ddev, prev); | |
892 | ||
893 | desc_write_CHAR(dc, prev, 0); | |
894 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
895 | prev->txd.phys, ddev->descsize, | |
896 | DMA_TO_DEVICE); | |
897 | ||
898 | first->txd.flags = flags; | |
899 | first->len = 0; | |
900 | ||
901 | return &first->txd; | |
902 | } | |
903 | ||
be16d833 | 904 | static int txx9dmac_terminate_all(struct dma_chan *chan) |
ea76f0b3 AN |
905 | { |
906 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
907 | struct txx9dmac_desc *desc, *_desc; | |
908 | LIST_HEAD(list); | |
909 | ||
910 | dev_vdbg(chan2dev(chan), "terminate_all\n"); | |
911 | spin_lock_bh(&dc->lock); | |
912 | ||
913 | txx9dmac_reset_chan(dc); | |
914 | ||
915 | /* active_list entries will end up before queued entries */ | |
916 | list_splice_init(&dc->queue, &list); | |
917 | list_splice_init(&dc->active_list, &list); | |
918 | ||
919 | spin_unlock_bh(&dc->lock); | |
920 | ||
921 | /* Flush all pending and queued descriptors */ | |
922 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | |
923 | txx9dmac_descriptor_complete(dc, desc); | |
c3635c78 LW |
924 | |
925 | return 0; | |
ea76f0b3 AN |
926 | } |
927 | ||
928 | static enum dma_status | |
07934481 LW |
929 | txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
930 | struct dma_tx_state *txstate) | |
ea76f0b3 AN |
931 | { |
932 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
96a2af41 | 933 | enum dma_status ret; |
ea76f0b3 | 934 | |
96a2af41 | 935 | ret = dma_cookie_status(chan, cookie, txstate); |
8f1fd114 VK |
936 | if (ret == DMA_COMPLETE) |
937 | return DMA_COMPLETE; | |
ea76f0b3 | 938 | |
985a0cb9 AS |
939 | spin_lock_bh(&dc->lock); |
940 | txx9dmac_scan_descriptors(dc); | |
941 | spin_unlock_bh(&dc->lock); | |
ea76f0b3 | 942 | |
985a0cb9 | 943 | return dma_cookie_status(chan, cookie, txstate); |
ea76f0b3 AN |
944 | } |
945 | ||
946 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, | |
947 | struct txx9dmac_desc *prev) | |
948 | { | |
949 | struct txx9dmac_dev *ddev = dc->ddev; | |
950 | struct txx9dmac_desc *desc; | |
951 | LIST_HEAD(list); | |
952 | ||
953 | prev = txx9dmac_last_child(prev); | |
954 | txx9dmac_dequeue(dc, &list); | |
955 | desc = list_entry(list.next, struct txx9dmac_desc, desc_node); | |
956 | desc_write_CHAR(dc, prev, desc->txd.phys); | |
957 | dma_sync_single_for_device(chan2parent(&dc->chan), | |
958 | prev->txd.phys, ddev->descsize, | |
959 | DMA_TO_DEVICE); | |
960 | mmiowb(); | |
961 | if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && | |
962 | channel_read_CHAR(dc) == prev->txd.phys) | |
963 | /* Restart chain DMA */ | |
964 | channel_write_CHAR(dc, desc->txd.phys); | |
965 | list_splice_tail(&list, &dc->active_list); | |
966 | } | |
967 | ||
968 | static void txx9dmac_issue_pending(struct dma_chan *chan) | |
969 | { | |
970 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
971 | ||
972 | spin_lock_bh(&dc->lock); | |
973 | ||
974 | if (!list_empty(&dc->active_list)) | |
975 | txx9dmac_scan_descriptors(dc); | |
976 | if (!list_empty(&dc->queue)) { | |
977 | if (list_empty(&dc->active_list)) { | |
978 | txx9dmac_dequeue(dc, &dc->active_list); | |
979 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | |
980 | } else if (txx9_dma_have_SMPCHN()) { | |
981 | struct txx9dmac_desc *prev = txx9dmac_last_active(dc); | |
982 | ||
983 | if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || | |
984 | txx9dmac_chan_INTENT(dc)) | |
985 | txx9dmac_chain_dynamic(dc, prev); | |
986 | } | |
987 | } | |
988 | ||
989 | spin_unlock_bh(&dc->lock); | |
990 | } | |
991 | ||
992 | static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) | |
993 | { | |
994 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
995 | struct txx9dmac_slave *ds = chan->private; | |
996 | struct txx9dmac_desc *desc; | |
997 | int i; | |
998 | ||
999 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | |
1000 | ||
1001 | /* ASSERT: channel is idle */ | |
1002 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | |
1003 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); | |
1004 | return -EIO; | |
1005 | } | |
1006 | ||
d3ee98cd | 1007 | dma_cookie_init(chan); |
ea76f0b3 AN |
1008 | |
1009 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; | |
1010 | txx9dmac_chan_set_SMPCHN(dc); | |
1011 | if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) | |
1012 | dc->ccr |= TXX9_DMA_CCR_INTENC; | |
1013 | if (chan->device->device_prep_dma_memcpy) { | |
1014 | if (ds) | |
1015 | return -EINVAL; | |
1016 | dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; | |
1017 | } else { | |
1018 | if (!ds || | |
1019 | (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) | |
1020 | return -EINVAL; | |
1021 | dc->ccr |= TXX9_DMA_CCR_EXTRQ | | |
1022 | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); | |
1023 | txx9dmac_chan_set_INTENT(dc); | |
1024 | } | |
1025 | ||
1026 | spin_lock_bh(&dc->lock); | |
1027 | i = dc->descs_allocated; | |
1028 | while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { | |
1029 | spin_unlock_bh(&dc->lock); | |
1030 | ||
1031 | desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); | |
1032 | if (!desc) { | |
1033 | dev_info(chan2dev(chan), | |
1034 | "only allocated %d descriptors\n", i); | |
1035 | spin_lock_bh(&dc->lock); | |
1036 | break; | |
1037 | } | |
1038 | txx9dmac_desc_put(dc, desc); | |
1039 | ||
1040 | spin_lock_bh(&dc->lock); | |
1041 | i = ++dc->descs_allocated; | |
1042 | } | |
1043 | spin_unlock_bh(&dc->lock); | |
1044 | ||
1045 | dev_dbg(chan2dev(chan), | |
1046 | "alloc_chan_resources allocated %d descriptors\n", i); | |
1047 | ||
1048 | return i; | |
1049 | } | |
1050 | ||
1051 | static void txx9dmac_free_chan_resources(struct dma_chan *chan) | |
1052 | { | |
1053 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | |
1054 | struct txx9dmac_dev *ddev = dc->ddev; | |
1055 | struct txx9dmac_desc *desc, *_desc; | |
1056 | LIST_HEAD(list); | |
1057 | ||
1058 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | |
1059 | dc->descs_allocated); | |
1060 | ||
1061 | /* ASSERT: channel is idle */ | |
1062 | BUG_ON(!list_empty(&dc->active_list)); | |
1063 | BUG_ON(!list_empty(&dc->queue)); | |
1064 | BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); | |
1065 | ||
1066 | spin_lock_bh(&dc->lock); | |
1067 | list_splice_init(&dc->free_list, &list); | |
1068 | dc->descs_allocated = 0; | |
1069 | spin_unlock_bh(&dc->lock); | |
1070 | ||
1071 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | |
1072 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | |
1073 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | |
1074 | ddev->descsize, DMA_TO_DEVICE); | |
1075 | kfree(desc); | |
1076 | } | |
1077 | ||
1078 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | |
1079 | } | |
1080 | ||
1081 | /*----------------------------------------------------------------------*/ | |
1082 | ||
1083 | static void txx9dmac_off(struct txx9dmac_dev *ddev) | |
1084 | { | |
1085 | dma_writel(ddev, MCR, 0); | |
1086 | mmiowb(); | |
1087 | } | |
1088 | ||
1089 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) | |
1090 | { | |
d4adcc01 JH |
1091 | struct txx9dmac_chan_platform_data *cpdata = |
1092 | dev_get_platdata(&pdev->dev); | |
ea76f0b3 | 1093 | struct platform_device *dmac_dev = cpdata->dmac_dev; |
d4adcc01 | 1094 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev); |
ea76f0b3 AN |
1095 | struct txx9dmac_chan *dc; |
1096 | int err; | |
1097 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; | |
1098 | int irq; | |
1099 | ||
1100 | dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); | |
1101 | if (!dc) | |
1102 | return -ENOMEM; | |
1103 | ||
1104 | dc->dma.dev = &pdev->dev; | |
1105 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; | |
1106 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; | |
be16d833 | 1107 | dc->dma.device_terminate_all = txx9dmac_terminate_all; |
07934481 | 1108 | dc->dma.device_tx_status = txx9dmac_tx_status; |
ea76f0b3 AN |
1109 | dc->dma.device_issue_pending = txx9dmac_issue_pending; |
1110 | if (pdata && pdata->memcpy_chan == ch) { | |
1111 | dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; | |
1112 | dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); | |
1113 | } else { | |
1114 | dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; | |
1115 | dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); | |
1116 | dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); | |
1117 | } | |
1118 | ||
1119 | INIT_LIST_HEAD(&dc->dma.channels); | |
1120 | dc->ddev = platform_get_drvdata(dmac_dev); | |
1121 | if (dc->ddev->irq < 0) { | |
1122 | irq = platform_get_irq(pdev, 0); | |
1123 | if (irq < 0) | |
1124 | return irq; | |
1125 | tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, | |
1126 | (unsigned long)dc); | |
1127 | dc->irq = irq; | |
1128 | err = devm_request_irq(&pdev->dev, dc->irq, | |
1129 | txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); | |
1130 | if (err) | |
1131 | return err; | |
1132 | } else | |
1133 | dc->irq = -1; | |
1134 | dc->ddev->chan[ch] = dc; | |
1135 | dc->chan.device = &dc->dma; | |
1136 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); | |
d3ee98cd | 1137 | dma_cookie_init(&dc->chan); |
ea76f0b3 AN |
1138 | |
1139 | if (is_dmac64(dc)) | |
1140 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; | |
1141 | else | |
1142 | dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; | |
1143 | spin_lock_init(&dc->lock); | |
1144 | ||
1145 | INIT_LIST_HEAD(&dc->active_list); | |
1146 | INIT_LIST_HEAD(&dc->queue); | |
1147 | INIT_LIST_HEAD(&dc->free_list); | |
1148 | ||
1149 | txx9dmac_reset_chan(dc); | |
1150 | ||
1151 | platform_set_drvdata(pdev, dc); | |
1152 | ||
1153 | err = dma_async_device_register(&dc->dma); | |
1154 | if (err) | |
1155 | return err; | |
1156 | dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", | |
1157 | dc->dma.dev_id, | |
1158 | dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", | |
1159 | dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); | |
1160 | ||
1161 | return 0; | |
1162 | } | |
1163 | ||
1d1bbd30 | 1164 | static int txx9dmac_chan_remove(struct platform_device *pdev) |
ea76f0b3 AN |
1165 | { |
1166 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); | |
1167 | ||
1168 | dma_async_device_unregister(&dc->dma); | |
1169 | if (dc->irq >= 0) | |
1170 | tasklet_kill(&dc->tasklet); | |
1171 | dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; | |
1172 | return 0; | |
1173 | } | |
1174 | ||
1175 | static int __init txx9dmac_probe(struct platform_device *pdev) | |
1176 | { | |
d4adcc01 | 1177 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ea76f0b3 AN |
1178 | struct resource *io; |
1179 | struct txx9dmac_dev *ddev; | |
1180 | u32 mcr; | |
1181 | int err; | |
1182 | ||
1183 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1184 | if (!io) | |
1185 | return -EINVAL; | |
1186 | ||
1187 | ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); | |
1188 | if (!ddev) | |
1189 | return -ENOMEM; | |
1190 | ||
1191 | if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), | |
1192 | dev_name(&pdev->dev))) | |
1193 | return -EBUSY; | |
1194 | ||
1195 | ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); | |
1196 | if (!ddev->regs) | |
1197 | return -ENOMEM; | |
1198 | ddev->have_64bit_regs = pdata->have_64bit_regs; | |
1199 | if (__is_dmac64(ddev)) | |
1200 | ddev->descsize = sizeof(struct txx9dmac_hwdesc); | |
1201 | else | |
1202 | ddev->descsize = sizeof(struct txx9dmac_hwdesc32); | |
1203 | ||
1204 | /* force dma off, just in case */ | |
1205 | txx9dmac_off(ddev); | |
1206 | ||
1207 | ddev->irq = platform_get_irq(pdev, 0); | |
1208 | if (ddev->irq >= 0) { | |
1209 | tasklet_init(&ddev->tasklet, txx9dmac_tasklet, | |
1210 | (unsigned long)ddev); | |
1211 | err = devm_request_irq(&pdev->dev, ddev->irq, | |
1212 | txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); | |
1213 | if (err) | |
1214 | return err; | |
1215 | } | |
1216 | ||
1217 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | |
1218 | if (pdata && pdata->memcpy_chan >= 0) | |
1219 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | |
1220 | dma_writel(ddev, MCR, mcr); | |
1221 | ||
1222 | platform_set_drvdata(pdev, ddev); | |
1223 | return 0; | |
1224 | } | |
1225 | ||
1d1bbd30 | 1226 | static int txx9dmac_remove(struct platform_device *pdev) |
ea76f0b3 AN |
1227 | { |
1228 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1229 | ||
1230 | txx9dmac_off(ddev); | |
1231 | if (ddev->irq >= 0) | |
1232 | tasklet_kill(&ddev->tasklet); | |
1233 | return 0; | |
1234 | } | |
1235 | ||
1236 | static void txx9dmac_shutdown(struct platform_device *pdev) | |
1237 | { | |
1238 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | |
1239 | ||
1240 | txx9dmac_off(ddev); | |
1241 | } | |
1242 | ||
4aebac2f | 1243 | static int txx9dmac_suspend_noirq(struct device *dev) |
ea76f0b3 | 1244 | { |
4aebac2f | 1245 | struct platform_device *pdev = to_platform_device(dev); |
ea76f0b3 AN |
1246 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1247 | ||
1248 | txx9dmac_off(ddev); | |
1249 | return 0; | |
1250 | } | |
1251 | ||
4aebac2f | 1252 | static int txx9dmac_resume_noirq(struct device *dev) |
ea76f0b3 | 1253 | { |
4aebac2f | 1254 | struct platform_device *pdev = to_platform_device(dev); |
ea76f0b3 | 1255 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
d4adcc01 | 1256 | struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ea76f0b3 AN |
1257 | u32 mcr; |
1258 | ||
1259 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | |
1260 | if (pdata && pdata->memcpy_chan >= 0) | |
1261 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | |
1262 | dma_writel(ddev, MCR, mcr); | |
1263 | return 0; | |
1264 | ||
1265 | } | |
1266 | ||
47145210 | 1267 | static const struct dev_pm_ops txx9dmac_dev_pm_ops = { |
4aebac2f MD |
1268 | .suspend_noirq = txx9dmac_suspend_noirq, |
1269 | .resume_noirq = txx9dmac_resume_noirq, | |
1270 | }; | |
1271 | ||
ea76f0b3 | 1272 | static struct platform_driver txx9dmac_chan_driver = { |
1d1bbd30 | 1273 | .remove = txx9dmac_chan_remove, |
ea76f0b3 AN |
1274 | .driver = { |
1275 | .name = "txx9dmac-chan", | |
1276 | }, | |
1277 | }; | |
1278 | ||
1279 | static struct platform_driver txx9dmac_driver = { | |
1d1bbd30 | 1280 | .remove = txx9dmac_remove, |
ea76f0b3 | 1281 | .shutdown = txx9dmac_shutdown, |
ea76f0b3 AN |
1282 | .driver = { |
1283 | .name = "txx9dmac", | |
4aebac2f | 1284 | .pm = &txx9dmac_dev_pm_ops, |
ea76f0b3 AN |
1285 | }, |
1286 | }; | |
1287 | ||
1288 | static int __init txx9dmac_init(void) | |
1289 | { | |
1290 | int rc; | |
1291 | ||
1292 | rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); | |
1293 | if (!rc) { | |
1294 | rc = platform_driver_probe(&txx9dmac_chan_driver, | |
1295 | txx9dmac_chan_probe); | |
1296 | if (rc) | |
1297 | platform_driver_unregister(&txx9dmac_driver); | |
1298 | } | |
1299 | return rc; | |
1300 | } | |
1301 | module_init(txx9dmac_init); | |
1302 | ||
1303 | static void __exit txx9dmac_exit(void) | |
1304 | { | |
1305 | platform_driver_unregister(&txx9dmac_chan_driver); | |
1306 | platform_driver_unregister(&txx9dmac_driver); | |
1307 | } | |
1308 | module_exit(txx9dmac_exit); | |
1309 | ||
1310 | MODULE_LICENSE("GPL"); | |
1311 | MODULE_DESCRIPTION("TXx9 DMA Controller driver"); | |
1312 | MODULE_AUTHOR("Atsushi Nemoto <[email protected]>"); | |
b0b4ce38 GU |
1313 | MODULE_ALIAS("platform:txx9dmac"); |
1314 | MODULE_ALIAS("platform:txx9dmac-chan"); |