]>
Commit | Line | Data |
---|---|---|
6365bead RK |
1 | /* |
2 | * SA11x0 DMAengine support | |
3 | * | |
4 | * Copyright (C) 2012 Russell King | |
5 | * Derived in part from arch/arm/mach-sa1100/dma.c, | |
6 | * Copyright (C) 2000, 2001 by Nicolas Pitre | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | #include <linux/sched.h> | |
13 | #include <linux/device.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/platform_device.h> | |
20 | #include <linux/sa11x0-dma.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/spinlock.h> | |
23 | ||
24 | #define NR_PHY_CHAN 6 | |
25 | #define DMA_ALIGN 3 | |
26 | #define DMA_MAX_SIZE 0x1fff | |
27 | #define DMA_CHUNK_SIZE 0x1000 | |
28 | ||
29 | #define DMA_DDAR 0x00 | |
30 | #define DMA_DCSR_S 0x04 | |
31 | #define DMA_DCSR_C 0x08 | |
32 | #define DMA_DCSR_R 0x0c | |
33 | #define DMA_DBSA 0x10 | |
34 | #define DMA_DBTA 0x14 | |
35 | #define DMA_DBSB 0x18 | |
36 | #define DMA_DBTB 0x1c | |
37 | #define DMA_SIZE 0x20 | |
38 | ||
39 | #define DCSR_RUN (1 << 0) | |
40 | #define DCSR_IE (1 << 1) | |
41 | #define DCSR_ERROR (1 << 2) | |
42 | #define DCSR_DONEA (1 << 3) | |
43 | #define DCSR_STRTA (1 << 4) | |
44 | #define DCSR_DONEB (1 << 5) | |
45 | #define DCSR_STRTB (1 << 6) | |
46 | #define DCSR_BIU (1 << 7) | |
47 | ||
48 | #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */ | |
49 | #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */ | |
50 | #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */ | |
51 | #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */ | |
52 | #define DDAR_Ser0UDCTr (0x0 << 4) | |
53 | #define DDAR_Ser0UDCRc (0x1 << 4) | |
54 | #define DDAR_Ser1SDLCTr (0x2 << 4) | |
55 | #define DDAR_Ser1SDLCRc (0x3 << 4) | |
56 | #define DDAR_Ser1UARTTr (0x4 << 4) | |
57 | #define DDAR_Ser1UARTRc (0x5 << 4) | |
58 | #define DDAR_Ser2ICPTr (0x6 << 4) | |
59 | #define DDAR_Ser2ICPRc (0x7 << 4) | |
60 | #define DDAR_Ser3UARTTr (0x8 << 4) | |
61 | #define DDAR_Ser3UARTRc (0x9 << 4) | |
62 | #define DDAR_Ser4MCP0Tr (0xa << 4) | |
63 | #define DDAR_Ser4MCP0Rc (0xb << 4) | |
64 | #define DDAR_Ser4MCP1Tr (0xc << 4) | |
65 | #define DDAR_Ser4MCP1Rc (0xd << 4) | |
66 | #define DDAR_Ser4SSPTr (0xe << 4) | |
67 | #define DDAR_Ser4SSPRc (0xf << 4) | |
68 | ||
69 | struct sa11x0_dma_sg { | |
70 | u32 addr; | |
71 | u32 len; | |
72 | }; | |
73 | ||
74 | struct sa11x0_dma_desc { | |
75 | struct dma_async_tx_descriptor tx; | |
76 | u32 ddar; | |
77 | size_t size; | |
78 | ||
79 | /* maybe protected by c->lock */ | |
80 | struct list_head node; | |
81 | unsigned sglen; | |
82 | struct sa11x0_dma_sg sg[0]; | |
83 | }; | |
84 | ||
85 | struct sa11x0_dma_phy; | |
86 | ||
87 | struct sa11x0_dma_chan { | |
88 | struct dma_chan chan; | |
89 | spinlock_t lock; | |
90 | dma_cookie_t lc; | |
91 | ||
92 | /* protected by c->lock */ | |
93 | struct sa11x0_dma_phy *phy; | |
94 | enum dma_status status; | |
95 | struct list_head desc_submitted; | |
96 | struct list_head desc_issued; | |
97 | ||
98 | /* protected by d->lock */ | |
99 | struct list_head node; | |
100 | ||
101 | u32 ddar; | |
102 | const char *name; | |
103 | }; | |
104 | ||
105 | struct sa11x0_dma_phy { | |
106 | void __iomem *base; | |
107 | struct sa11x0_dma_dev *dev; | |
108 | unsigned num; | |
109 | ||
110 | struct sa11x0_dma_chan *vchan; | |
111 | ||
112 | /* Protected by c->lock */ | |
113 | unsigned sg_load; | |
114 | struct sa11x0_dma_desc *txd_load; | |
115 | unsigned sg_done; | |
116 | struct sa11x0_dma_desc *txd_done; | |
117 | #ifdef CONFIG_PM_SLEEP | |
118 | u32 dbs[2]; | |
119 | u32 dbt[2]; | |
120 | u32 dcsr; | |
121 | #endif | |
122 | }; | |
123 | ||
124 | struct sa11x0_dma_dev { | |
125 | struct dma_device slave; | |
126 | void __iomem *base; | |
127 | spinlock_t lock; | |
128 | struct tasklet_struct task; | |
129 | struct list_head chan_pending; | |
130 | struct list_head desc_complete; | |
131 | struct sa11x0_dma_phy phy[NR_PHY_CHAN]; | |
132 | }; | |
133 | ||
134 | static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) | |
135 | { | |
136 | return container_of(chan, struct sa11x0_dma_chan, chan); | |
137 | } | |
138 | ||
139 | static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) | |
140 | { | |
141 | return container_of(dmadev, struct sa11x0_dma_dev, slave); | |
142 | } | |
143 | ||
144 | static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) | |
145 | { | |
146 | return container_of(tx, struct sa11x0_dma_desc, tx); | |
147 | } | |
148 | ||
149 | static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) | |
150 | { | |
151 | if (list_empty(&c->desc_issued)) | |
152 | return NULL; | |
153 | ||
154 | return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); | |
155 | } | |
156 | ||
157 | static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) | |
158 | { | |
159 | list_del(&txd->node); | |
160 | p->txd_load = txd; | |
161 | p->sg_load = 0; | |
162 | ||
163 | dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", | |
164 | p->num, txd, txd->tx.cookie, txd->ddar); | |
165 | } | |
166 | ||
167 | static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, | |
168 | struct sa11x0_dma_chan *c) | |
169 | { | |
170 | struct sa11x0_dma_desc *txd = p->txd_load; | |
171 | struct sa11x0_dma_sg *sg; | |
172 | void __iomem *base = p->base; | |
173 | unsigned dbsx, dbtx; | |
174 | u32 dcsr; | |
175 | ||
176 | if (!txd) | |
177 | return; | |
178 | ||
179 | dcsr = readl_relaxed(base + DMA_DCSR_R); | |
180 | ||
181 | /* Don't try to load the next transfer if both buffers are started */ | |
182 | if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB)) | |
183 | return; | |
184 | ||
185 | if (p->sg_load == txd->sglen) { | |
186 | struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); | |
187 | ||
188 | /* | |
189 | * We have reached the end of the current descriptor. | |
190 | * Peek at the next descriptor, and if compatible with | |
191 | * the current, start processing it. | |
192 | */ | |
193 | if (txn && txn->ddar == txd->ddar) { | |
194 | txd = txn; | |
195 | sa11x0_dma_start_desc(p, txn); | |
196 | } else { | |
197 | p->txd_load = NULL; | |
198 | return; | |
199 | } | |
200 | } | |
201 | ||
202 | sg = &txd->sg[p->sg_load++]; | |
203 | ||
204 | /* Select buffer to load according to channel status */ | |
205 | if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) || | |
206 | ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) { | |
207 | dbsx = DMA_DBSA; | |
208 | dbtx = DMA_DBTA; | |
209 | dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN; | |
210 | } else { | |
211 | dbsx = DMA_DBSB; | |
212 | dbtx = DMA_DBTB; | |
213 | dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN; | |
214 | } | |
215 | ||
216 | writel_relaxed(sg->addr, base + dbsx); | |
217 | writel_relaxed(sg->len, base + dbtx); | |
218 | writel(dcsr, base + DMA_DCSR_S); | |
219 | ||
220 | dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n", | |
221 | p->num, dcsr, | |
222 | 'A' + (dbsx == DMA_DBSB), sg->addr, | |
223 | 'A' + (dbtx == DMA_DBTB), sg->len); | |
224 | } | |
225 | ||
226 | static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, | |
227 | struct sa11x0_dma_chan *c) | |
228 | { | |
229 | struct sa11x0_dma_desc *txd = p->txd_done; | |
230 | ||
231 | if (++p->sg_done == txd->sglen) { | |
232 | struct sa11x0_dma_dev *d = p->dev; | |
233 | ||
234 | dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", | |
235 | p->num, p->txd_done, p->txd_done->tx.cookie); | |
236 | ||
237 | c->lc = txd->tx.cookie; | |
238 | ||
239 | spin_lock(&d->lock); | |
240 | list_add_tail(&txd->node, &d->desc_complete); | |
241 | spin_unlock(&d->lock); | |
242 | ||
243 | p->sg_done = 0; | |
244 | p->txd_done = p->txd_load; | |
245 | ||
246 | tasklet_schedule(&d->task); | |
247 | } | |
248 | ||
249 | sa11x0_dma_start_sg(p, c); | |
250 | } | |
251 | ||
252 | static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) | |
253 | { | |
254 | struct sa11x0_dma_phy *p = dev_id; | |
255 | struct sa11x0_dma_dev *d = p->dev; | |
256 | struct sa11x0_dma_chan *c; | |
257 | u32 dcsr; | |
258 | ||
259 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | |
260 | if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB))) | |
261 | return IRQ_NONE; | |
262 | ||
263 | /* Clear reported status bits */ | |
264 | writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB), | |
265 | p->base + DMA_DCSR_C); | |
266 | ||
267 | dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr); | |
268 | ||
269 | if (dcsr & DCSR_ERROR) { | |
270 | dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n", | |
271 | p->num, dcsr, | |
272 | readl_relaxed(p->base + DMA_DDAR), | |
273 | readl_relaxed(p->base + DMA_DBSA), | |
274 | readl_relaxed(p->base + DMA_DBTA), | |
275 | readl_relaxed(p->base + DMA_DBSB), | |
276 | readl_relaxed(p->base + DMA_DBTB)); | |
277 | } | |
278 | ||
279 | c = p->vchan; | |
280 | if (c) { | |
281 | unsigned long flags; | |
282 | ||
283 | spin_lock_irqsave(&c->lock, flags); | |
284 | /* | |
285 | * Now that we're holding the lock, check that the vchan | |
286 | * really is associated with this pchan before touching the | |
287 | * hardware. This should always succeed, because we won't | |
288 | * change p->vchan or c->phy while the channel is actively | |
289 | * transferring. | |
290 | */ | |
291 | if (c->phy == p) { | |
292 | if (dcsr & DCSR_DONEA) | |
293 | sa11x0_dma_complete(p, c); | |
294 | if (dcsr & DCSR_DONEB) | |
295 | sa11x0_dma_complete(p, c); | |
296 | } | |
297 | spin_unlock_irqrestore(&c->lock, flags); | |
298 | } | |
299 | ||
300 | return IRQ_HANDLED; | |
301 | } | |
302 | ||
303 | static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c) | |
304 | { | |
305 | struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c); | |
306 | ||
307 | /* If the issued list is empty, we have no further txds to process */ | |
308 | if (txd) { | |
309 | struct sa11x0_dma_phy *p = c->phy; | |
310 | ||
311 | sa11x0_dma_start_desc(p, txd); | |
312 | p->txd_done = txd; | |
313 | p->sg_done = 0; | |
314 | ||
315 | /* The channel should not have any transfers started */ | |
316 | WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) & | |
317 | (DCSR_STRTA | DCSR_STRTB)); | |
318 | ||
319 | /* Clear the run and start bits before changing DDAR */ | |
320 | writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB, | |
321 | p->base + DMA_DCSR_C); | |
322 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); | |
323 | ||
324 | /* Try to start both buffers */ | |
325 | sa11x0_dma_start_sg(p, c); | |
326 | sa11x0_dma_start_sg(p, c); | |
327 | } | |
328 | } | |
329 | ||
330 | static void sa11x0_dma_tasklet(unsigned long arg) | |
331 | { | |
332 | struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; | |
333 | struct sa11x0_dma_phy *p; | |
334 | struct sa11x0_dma_chan *c; | |
335 | struct sa11x0_dma_desc *txd, *txn; | |
336 | LIST_HEAD(head); | |
337 | unsigned pch, pch_alloc = 0; | |
338 | ||
339 | dev_dbg(d->slave.dev, "tasklet enter\n"); | |
340 | ||
341 | /* Get the completed tx descriptors */ | |
342 | spin_lock_irq(&d->lock); | |
343 | list_splice_init(&d->desc_complete, &head); | |
344 | spin_unlock_irq(&d->lock); | |
345 | ||
346 | list_for_each_entry(txd, &head, node) { | |
347 | c = to_sa11x0_dma_chan(txd->tx.chan); | |
348 | ||
349 | dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", | |
350 | c, txd, txd->tx.cookie); | |
351 | ||
352 | spin_lock_irq(&c->lock); | |
353 | p = c->phy; | |
354 | if (p) { | |
355 | if (!p->txd_done) | |
356 | sa11x0_dma_start_txd(c); | |
357 | if (!p->txd_done) { | |
358 | /* No current txd associated with this channel */ | |
359 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); | |
360 | ||
361 | /* Mark this channel free */ | |
362 | c->phy = NULL; | |
363 | p->vchan = NULL; | |
364 | } | |
365 | } | |
366 | spin_unlock_irq(&c->lock); | |
367 | } | |
368 | ||
369 | spin_lock_irq(&d->lock); | |
370 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | |
371 | p = &d->phy[pch]; | |
372 | ||
373 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { | |
374 | c = list_first_entry(&d->chan_pending, | |
375 | struct sa11x0_dma_chan, node); | |
376 | list_del_init(&c->node); | |
377 | ||
378 | pch_alloc |= 1 << pch; | |
379 | ||
380 | /* Mark this channel allocated */ | |
381 | p->vchan = c; | |
382 | ||
383 | dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); | |
384 | } | |
385 | } | |
386 | spin_unlock_irq(&d->lock); | |
387 | ||
388 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | |
389 | if (pch_alloc & (1 << pch)) { | |
390 | p = &d->phy[pch]; | |
391 | c = p->vchan; | |
392 | ||
393 | spin_lock_irq(&c->lock); | |
394 | c->phy = p; | |
395 | ||
396 | sa11x0_dma_start_txd(c); | |
397 | spin_unlock_irq(&c->lock); | |
398 | } | |
399 | } | |
400 | ||
401 | /* Now free the completed tx descriptor, and call their callbacks */ | |
402 | list_for_each_entry_safe(txd, txn, &head, node) { | |
403 | dma_async_tx_callback callback = txd->tx.callback; | |
404 | void *callback_param = txd->tx.callback_param; | |
405 | ||
406 | dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", | |
407 | txd, txd->tx.cookie); | |
408 | ||
409 | kfree(txd); | |
410 | ||
411 | if (callback) | |
412 | callback(callback_param); | |
413 | } | |
414 | ||
415 | dev_dbg(d->slave.dev, "tasklet exit\n"); | |
416 | } | |
417 | ||
418 | ||
419 | static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) | |
420 | { | |
421 | struct sa11x0_dma_desc *txd, *txn; | |
422 | ||
423 | list_for_each_entry_safe(txd, txn, head, node) { | |
424 | dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); | |
425 | kfree(txd); | |
426 | } | |
427 | } | |
428 | ||
429 | static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) | |
430 | { | |
431 | return 0; | |
432 | } | |
433 | ||
434 | static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) | |
435 | { | |
436 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | |
437 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | |
438 | unsigned long flags; | |
439 | LIST_HEAD(head); | |
440 | ||
441 | spin_lock_irqsave(&c->lock, flags); | |
442 | spin_lock(&d->lock); | |
443 | list_del_init(&c->node); | |
444 | spin_unlock(&d->lock); | |
445 | ||
446 | list_splice_tail_init(&c->desc_submitted, &head); | |
447 | list_splice_tail_init(&c->desc_issued, &head); | |
448 | spin_unlock_irqrestore(&c->lock, flags); | |
449 | ||
450 | sa11x0_dma_desc_free(d, &head); | |
451 | } | |
452 | ||
453 | static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) | |
454 | { | |
455 | unsigned reg; | |
456 | u32 dcsr; | |
457 | ||
458 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | |
459 | ||
460 | if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA || | |
461 | (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU) | |
462 | reg = DMA_DBSA; | |
463 | else | |
464 | reg = DMA_DBSB; | |
465 | ||
466 | return readl_relaxed(p->base + reg); | |
467 | } | |
468 | ||
469 | static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, | |
470 | dma_cookie_t cookie, struct dma_tx_state *state) | |
471 | { | |
472 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | |
473 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | |
474 | struct sa11x0_dma_phy *p; | |
475 | struct sa11x0_dma_desc *txd; | |
476 | dma_cookie_t last_used, last_complete; | |
477 | unsigned long flags; | |
478 | enum dma_status ret; | |
479 | size_t bytes = 0; | |
480 | ||
481 | last_used = c->chan.cookie; | |
482 | last_complete = c->lc; | |
483 | ||
484 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
485 | if (ret == DMA_SUCCESS) { | |
486 | dma_set_tx_state(state, last_complete, last_used, 0); | |
487 | return ret; | |
488 | } | |
489 | ||
490 | spin_lock_irqsave(&c->lock, flags); | |
491 | p = c->phy; | |
492 | ret = c->status; | |
493 | if (p) { | |
494 | dma_addr_t addr = sa11x0_dma_pos(p); | |
495 | ||
496 | dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); | |
497 | ||
498 | txd = p->txd_done; | |
499 | if (txd) { | |
500 | unsigned i; | |
501 | ||
502 | for (i = 0; i < txd->sglen; i++) { | |
503 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", | |
504 | i, txd->sg[i].addr, txd->sg[i].len); | |
505 | if (addr >= txd->sg[i].addr && | |
506 | addr < txd->sg[i].addr + txd->sg[i].len) { | |
507 | unsigned len; | |
508 | ||
509 | len = txd->sg[i].len - | |
510 | (addr - txd->sg[i].addr); | |
511 | dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n", | |
512 | i, len); | |
513 | bytes += len; | |
514 | i++; | |
515 | break; | |
516 | } | |
517 | } | |
518 | for (; i < txd->sglen; i++) { | |
519 | dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n", | |
520 | i, txd->sg[i].addr, txd->sg[i].len); | |
521 | bytes += txd->sg[i].len; | |
522 | } | |
523 | } | |
524 | if (txd != p->txd_load && p->txd_load) | |
525 | bytes += p->txd_load->size; | |
526 | } | |
527 | list_for_each_entry(txd, &c->desc_issued, node) { | |
528 | bytes += txd->size; | |
529 | } | |
530 | spin_unlock_irqrestore(&c->lock, flags); | |
531 | ||
532 | dma_set_tx_state(state, last_complete, last_used, bytes); | |
533 | ||
534 | dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); | |
535 | ||
536 | return ret; | |
537 | } | |
538 | ||
539 | /* | |
540 | * Move pending txds to the issued list, and re-init pending list. | |
541 | * If not already pending, add this channel to the list of pending | |
542 | * channels and trigger the tasklet to run. | |
543 | */ | |
544 | static void sa11x0_dma_issue_pending(struct dma_chan *chan) | |
545 | { | |
546 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | |
547 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | |
548 | unsigned long flags; | |
549 | ||
550 | spin_lock_irqsave(&c->lock, flags); | |
551 | list_splice_tail_init(&c->desc_submitted, &c->desc_issued); | |
552 | if (!list_empty(&c->desc_issued)) { | |
553 | spin_lock(&d->lock); | |
554 | if (!c->phy && list_empty(&c->node)) { | |
555 | list_add_tail(&c->node, &d->chan_pending); | |
556 | tasklet_schedule(&d->task); | |
557 | dev_dbg(d->slave.dev, "vchan %p: issued\n", c); | |
558 | } | |
559 | spin_unlock(&d->lock); | |
560 | } else | |
561 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); | |
562 | spin_unlock_irqrestore(&c->lock, flags); | |
563 | } | |
564 | ||
565 | static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |
566 | { | |
567 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); | |
568 | struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); | |
569 | unsigned long flags; | |
570 | ||
571 | spin_lock_irqsave(&c->lock, flags); | |
572 | c->chan.cookie += 1; | |
573 | if (c->chan.cookie < 0) | |
574 | c->chan.cookie = 1; | |
575 | txd->tx.cookie = c->chan.cookie; | |
576 | ||
577 | list_add_tail(&txd->node, &c->desc_submitted); | |
578 | spin_unlock_irqrestore(&c->lock, flags); | |
579 | ||
580 | dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", | |
581 | c, txd, txd->tx.cookie); | |
582 | ||
583 | return txd->tx.cookie; | |
584 | } | |
585 | ||
586 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | |
587 | struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, | |
d9d54540 | 588 | enum dma_transfer_direction dir, unsigned long flags, void *context) |
6365bead RK |
589 | { |
590 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | |
591 | struct sa11x0_dma_desc *txd; | |
592 | struct scatterlist *sgent; | |
593 | unsigned i, j = sglen; | |
594 | size_t size = 0; | |
595 | ||
596 | /* SA11x0 channels can only operate in their native direction */ | |
597 | if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { | |
598 | dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", | |
599 | c, c->ddar, dir); | |
600 | return NULL; | |
601 | } | |
602 | ||
603 | /* Do not allow zero-sized txds */ | |
604 | if (sglen == 0) | |
605 | return NULL; | |
606 | ||
607 | for_each_sg(sg, sgent, sglen, i) { | |
608 | dma_addr_t addr = sg_dma_address(sgent); | |
609 | unsigned int len = sg_dma_len(sgent); | |
610 | ||
611 | if (len > DMA_MAX_SIZE) | |
612 | j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; | |
613 | if (addr & DMA_ALIGN) { | |
614 | dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", | |
615 | c, addr); | |
616 | return NULL; | |
617 | } | |
618 | } | |
619 | ||
620 | txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); | |
621 | if (!txd) { | |
622 | dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); | |
623 | return NULL; | |
624 | } | |
625 | ||
626 | j = 0; | |
627 | for_each_sg(sg, sgent, sglen, i) { | |
628 | dma_addr_t addr = sg_dma_address(sgent); | |
629 | unsigned len = sg_dma_len(sgent); | |
630 | ||
631 | size += len; | |
632 | ||
633 | do { | |
634 | unsigned tlen = len; | |
635 | ||
636 | /* | |
637 | * Check whether the transfer will fit. If not, try | |
638 | * to split the transfer up such that we end up with | |
639 | * equal chunks - but make sure that we preserve the | |
640 | * alignment. This avoids small segments. | |
641 | */ | |
642 | if (tlen > DMA_MAX_SIZE) { | |
643 | unsigned mult = DIV_ROUND_UP(tlen, | |
644 | DMA_MAX_SIZE & ~DMA_ALIGN); | |
645 | ||
646 | tlen = (tlen / mult) & ~DMA_ALIGN; | |
647 | } | |
648 | ||
649 | txd->sg[j].addr = addr; | |
650 | txd->sg[j].len = tlen; | |
651 | ||
652 | addr += tlen; | |
653 | len -= tlen; | |
654 | j++; | |
655 | } while (len); | |
656 | } | |
657 | ||
658 | dma_async_tx_descriptor_init(&txd->tx, &c->chan); | |
659 | txd->tx.flags = flags; | |
660 | txd->tx.tx_submit = sa11x0_dma_tx_submit; | |
661 | txd->ddar = c->ddar; | |
662 | txd->size = size; | |
663 | txd->sglen = j; | |
664 | ||
665 | dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", | |
666 | c, txd, txd->size, txd->sglen); | |
667 | ||
668 | return &txd->tx; | |
669 | } | |
670 | ||
671 | static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) | |
672 | { | |
673 | u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); | |
674 | dma_addr_t addr; | |
675 | enum dma_slave_buswidth width; | |
676 | u32 maxburst; | |
677 | ||
678 | if (ddar & DDAR_RW) { | |
679 | addr = cfg->src_addr; | |
680 | width = cfg->src_addr_width; | |
681 | maxburst = cfg->src_maxburst; | |
682 | } else { | |
683 | addr = cfg->dst_addr; | |
684 | width = cfg->dst_addr_width; | |
685 | maxburst = cfg->dst_maxburst; | |
686 | } | |
687 | ||
688 | if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE && | |
689 | width != DMA_SLAVE_BUSWIDTH_2_BYTES) || | |
690 | (maxburst != 4 && maxburst != 8)) | |
691 | return -EINVAL; | |
692 | ||
693 | if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | |
694 | ddar |= DDAR_DW; | |
695 | if (maxburst == 8) | |
696 | ddar |= DDAR_BS; | |
697 | ||
698 | dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", | |
699 | c, addr, width, maxburst); | |
700 | ||
701 | c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; | |
702 | ||
703 | return 0; | |
704 | } | |
705 | ||
706 | static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
707 | unsigned long arg) | |
708 | { | |
709 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | |
710 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | |
711 | struct sa11x0_dma_phy *p; | |
712 | LIST_HEAD(head); | |
713 | unsigned long flags; | |
714 | int ret; | |
715 | ||
716 | switch (cmd) { | |
717 | case DMA_SLAVE_CONFIG: | |
718 | return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); | |
719 | ||
720 | case DMA_TERMINATE_ALL: | |
721 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); | |
722 | /* Clear the tx descriptor lists */ | |
723 | spin_lock_irqsave(&c->lock, flags); | |
724 | list_splice_tail_init(&c->desc_submitted, &head); | |
725 | list_splice_tail_init(&c->desc_issued, &head); | |
726 | ||
727 | p = c->phy; | |
728 | if (p) { | |
729 | struct sa11x0_dma_desc *txd, *txn; | |
730 | ||
731 | dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); | |
732 | /* vchan is assigned to a pchan - stop the channel */ | |
733 | writel(DCSR_RUN | DCSR_IE | | |
734 | DCSR_STRTA | DCSR_DONEA | | |
735 | DCSR_STRTB | DCSR_DONEB, | |
736 | p->base + DMA_DCSR_C); | |
737 | ||
738 | list_for_each_entry_safe(txd, txn, &d->desc_complete, node) | |
739 | if (txd->tx.chan == &c->chan) | |
740 | list_move(&txd->node, &head); | |
741 | ||
742 | if (p->txd_load) { | |
743 | if (p->txd_load != p->txd_done) | |
744 | list_add_tail(&p->txd_load->node, &head); | |
745 | p->txd_load = NULL; | |
746 | } | |
747 | if (p->txd_done) { | |
748 | list_add_tail(&p->txd_done->node, &head); | |
749 | p->txd_done = NULL; | |
750 | } | |
751 | c->phy = NULL; | |
752 | spin_lock(&d->lock); | |
753 | p->vchan = NULL; | |
754 | spin_unlock(&d->lock); | |
755 | tasklet_schedule(&d->task); | |
756 | } | |
757 | spin_unlock_irqrestore(&c->lock, flags); | |
758 | sa11x0_dma_desc_free(d, &head); | |
759 | ret = 0; | |
760 | break; | |
761 | ||
762 | case DMA_PAUSE: | |
763 | dev_dbg(d->slave.dev, "vchan %p: pause\n", c); | |
764 | spin_lock_irqsave(&c->lock, flags); | |
765 | if (c->status == DMA_IN_PROGRESS) { | |
766 | c->status = DMA_PAUSED; | |
767 | ||
768 | p = c->phy; | |
769 | if (p) { | |
770 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); | |
771 | } else { | |
772 | spin_lock(&d->lock); | |
773 | list_del_init(&c->node); | |
774 | spin_unlock(&d->lock); | |
775 | } | |
776 | } | |
777 | spin_unlock_irqrestore(&c->lock, flags); | |
778 | ret = 0; | |
779 | break; | |
780 | ||
781 | case DMA_RESUME: | |
782 | dev_dbg(d->slave.dev, "vchan %p: resume\n", c); | |
783 | spin_lock_irqsave(&c->lock, flags); | |
784 | if (c->status == DMA_PAUSED) { | |
785 | c->status = DMA_IN_PROGRESS; | |
786 | ||
787 | p = c->phy; | |
788 | if (p) { | |
789 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); | |
790 | } else if (!list_empty(&c->desc_issued)) { | |
791 | spin_lock(&d->lock); | |
792 | list_add_tail(&c->node, &d->chan_pending); | |
793 | spin_unlock(&d->lock); | |
794 | } | |
795 | } | |
796 | spin_unlock_irqrestore(&c->lock, flags); | |
797 | ret = 0; | |
798 | break; | |
799 | ||
800 | default: | |
801 | ret = -ENXIO; | |
802 | break; | |
803 | } | |
804 | ||
805 | return ret; | |
806 | } | |
807 | ||
808 | struct sa11x0_dma_channel_desc { | |
809 | u32 ddar; | |
810 | const char *name; | |
811 | }; | |
812 | ||
813 | #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 } | |
814 | static const struct sa11x0_dma_channel_desc chan_desc[] = { | |
815 | CD(Ser0UDCTr, 0), | |
816 | CD(Ser0UDCRc, DDAR_RW), | |
817 | CD(Ser1SDLCTr, 0), | |
818 | CD(Ser1SDLCRc, DDAR_RW), | |
819 | CD(Ser1UARTTr, 0), | |
820 | CD(Ser1UARTRc, DDAR_RW), | |
821 | CD(Ser2ICPTr, 0), | |
822 | CD(Ser2ICPRc, DDAR_RW), | |
823 | CD(Ser3UARTTr, 0), | |
824 | CD(Ser3UARTRc, DDAR_RW), | |
825 | CD(Ser4MCP0Tr, 0), | |
826 | CD(Ser4MCP0Rc, DDAR_RW), | |
827 | CD(Ser4MCP1Tr, 0), | |
828 | CD(Ser4MCP1Rc, DDAR_RW), | |
829 | CD(Ser4SSPTr, 0), | |
830 | CD(Ser4SSPRc, DDAR_RW), | |
831 | }; | |
832 | ||
833 | static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, | |
834 | struct device *dev) | |
835 | { | |
836 | unsigned i; | |
837 | ||
838 | dmadev->chancnt = ARRAY_SIZE(chan_desc); | |
839 | INIT_LIST_HEAD(&dmadev->channels); | |
840 | dmadev->dev = dev; | |
841 | dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; | |
842 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; | |
843 | dmadev->device_control = sa11x0_dma_control; | |
844 | dmadev->device_tx_status = sa11x0_dma_tx_status; | |
845 | dmadev->device_issue_pending = sa11x0_dma_issue_pending; | |
846 | ||
847 | for (i = 0; i < dmadev->chancnt; i++) { | |
848 | struct sa11x0_dma_chan *c; | |
849 | ||
850 | c = kzalloc(sizeof(*c), GFP_KERNEL); | |
851 | if (!c) { | |
852 | dev_err(dev, "no memory for channel %u\n", i); | |
853 | return -ENOMEM; | |
854 | } | |
855 | ||
856 | c->chan.device = dmadev; | |
857 | c->status = DMA_IN_PROGRESS; | |
858 | c->ddar = chan_desc[i].ddar; | |
859 | c->name = chan_desc[i].name; | |
860 | spin_lock_init(&c->lock); | |
861 | INIT_LIST_HEAD(&c->desc_submitted); | |
862 | INIT_LIST_HEAD(&c->desc_issued); | |
863 | INIT_LIST_HEAD(&c->node); | |
864 | list_add_tail(&c->chan.device_node, &dmadev->channels); | |
865 | } | |
866 | ||
867 | return dma_async_device_register(dmadev); | |
868 | } | |
869 | ||
870 | static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr, | |
871 | void *data) | |
872 | { | |
873 | int irq = platform_get_irq(pdev, nr); | |
874 | ||
875 | if (irq <= 0) | |
876 | return -ENXIO; | |
877 | ||
878 | return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data); | |
879 | } | |
880 | ||
881 | static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr, | |
882 | void *data) | |
883 | { | |
884 | int irq = platform_get_irq(pdev, nr); | |
885 | if (irq > 0) | |
886 | free_irq(irq, data); | |
887 | } | |
888 | ||
889 | static void sa11x0_dma_free_channels(struct dma_device *dmadev) | |
890 | { | |
891 | struct sa11x0_dma_chan *c, *cn; | |
892 | ||
893 | list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { | |
894 | list_del(&c->chan.device_node); | |
895 | kfree(c); | |
896 | } | |
897 | } | |
898 | ||
899 | static int __devinit sa11x0_dma_probe(struct platform_device *pdev) | |
900 | { | |
901 | struct sa11x0_dma_dev *d; | |
902 | struct resource *res; | |
903 | unsigned i; | |
904 | int ret; | |
905 | ||
906 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
907 | if (!res) | |
908 | return -ENXIO; | |
909 | ||
910 | d = kzalloc(sizeof(*d), GFP_KERNEL); | |
911 | if (!d) { | |
912 | ret = -ENOMEM; | |
913 | goto err_alloc; | |
914 | } | |
915 | ||
916 | spin_lock_init(&d->lock); | |
917 | INIT_LIST_HEAD(&d->chan_pending); | |
918 | INIT_LIST_HEAD(&d->desc_complete); | |
919 | ||
920 | d->base = ioremap(res->start, resource_size(res)); | |
921 | if (!d->base) { | |
922 | ret = -ENOMEM; | |
923 | goto err_ioremap; | |
924 | } | |
925 | ||
926 | tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d); | |
927 | ||
928 | for (i = 0; i < NR_PHY_CHAN; i++) { | |
929 | struct sa11x0_dma_phy *p = &d->phy[i]; | |
930 | ||
931 | p->dev = d; | |
932 | p->num = i; | |
933 | p->base = d->base + i * DMA_SIZE; | |
934 | writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR | | |
935 | DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB, | |
936 | p->base + DMA_DCSR_C); | |
937 | writel_relaxed(0, p->base + DMA_DDAR); | |
938 | ||
939 | ret = sa11x0_dma_request_irq(pdev, i, p); | |
940 | if (ret) { | |
941 | while (i) { | |
942 | i--; | |
943 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); | |
944 | } | |
945 | goto err_irq; | |
946 | } | |
947 | } | |
948 | ||
949 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | |
950 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; | |
951 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); | |
952 | if (ret) { | |
953 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", | |
954 | ret); | |
955 | goto err_slave_reg; | |
956 | } | |
957 | ||
958 | platform_set_drvdata(pdev, d); | |
959 | return 0; | |
960 | ||
961 | err_slave_reg: | |
962 | sa11x0_dma_free_channels(&d->slave); | |
963 | for (i = 0; i < NR_PHY_CHAN; i++) | |
964 | sa11x0_dma_free_irq(pdev, i, &d->phy[i]); | |
965 | err_irq: | |
966 | tasklet_kill(&d->task); | |
967 | iounmap(d->base); | |
968 | err_ioremap: | |
969 | kfree(d); | |
970 | err_alloc: | |
971 | return ret; | |
972 | } | |
973 | ||
974 | static int __devexit sa11x0_dma_remove(struct platform_device *pdev) | |
975 | { | |
976 | struct sa11x0_dma_dev *d = platform_get_drvdata(pdev); | |
977 | unsigned pch; | |
978 | ||
979 | dma_async_device_unregister(&d->slave); | |
980 | ||
981 | sa11x0_dma_free_channels(&d->slave); | |
982 | for (pch = 0; pch < NR_PHY_CHAN; pch++) | |
983 | sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]); | |
984 | tasklet_kill(&d->task); | |
985 | iounmap(d->base); | |
986 | kfree(d); | |
987 | ||
988 | return 0; | |
989 | } | |
990 | ||
991 | #ifdef CONFIG_PM_SLEEP | |
992 | static int sa11x0_dma_suspend(struct device *dev) | |
993 | { | |
994 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); | |
995 | unsigned pch; | |
996 | ||
997 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | |
998 | struct sa11x0_dma_phy *p = &d->phy[pch]; | |
999 | u32 dcsr, saved_dcsr; | |
1000 | ||
1001 | dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R); | |
1002 | if (dcsr & DCSR_RUN) { | |
1003 | writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); | |
1004 | dcsr = readl_relaxed(p->base + DMA_DCSR_R); | |
1005 | } | |
1006 | ||
1007 | saved_dcsr &= DCSR_RUN | DCSR_IE; | |
1008 | if (dcsr & DCSR_BIU) { | |
1009 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSB); | |
1010 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTB); | |
1011 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSA); | |
1012 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTA); | |
1013 | saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) | | |
1014 | (dcsr & DCSR_STRTB ? DCSR_STRTA : 0); | |
1015 | } else { | |
1016 | p->dbs[0] = readl_relaxed(p->base + DMA_DBSA); | |
1017 | p->dbt[0] = readl_relaxed(p->base + DMA_DBTA); | |
1018 | p->dbs[1] = readl_relaxed(p->base + DMA_DBSB); | |
1019 | p->dbt[1] = readl_relaxed(p->base + DMA_DBTB); | |
1020 | saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB); | |
1021 | } | |
1022 | p->dcsr = saved_dcsr; | |
1023 | ||
1024 | writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C); | |
1025 | } | |
1026 | ||
1027 | return 0; | |
1028 | } | |
1029 | ||
1030 | static int sa11x0_dma_resume(struct device *dev) | |
1031 | { | |
1032 | struct sa11x0_dma_dev *d = dev_get_drvdata(dev); | |
1033 | unsigned pch; | |
1034 | ||
1035 | for (pch = 0; pch < NR_PHY_CHAN; pch++) { | |
1036 | struct sa11x0_dma_phy *p = &d->phy[pch]; | |
1037 | struct sa11x0_dma_desc *txd = NULL; | |
1038 | u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R); | |
1039 | ||
1040 | WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN)); | |
1041 | ||
1042 | if (p->txd_done) | |
1043 | txd = p->txd_done; | |
1044 | else if (p->txd_load) | |
1045 | txd = p->txd_load; | |
1046 | ||
1047 | if (!txd) | |
1048 | continue; | |
1049 | ||
1050 | writel_relaxed(txd->ddar, p->base + DMA_DDAR); | |
1051 | ||
1052 | writel_relaxed(p->dbs[0], p->base + DMA_DBSA); | |
1053 | writel_relaxed(p->dbt[0], p->base + DMA_DBTA); | |
1054 | writel_relaxed(p->dbs[1], p->base + DMA_DBSB); | |
1055 | writel_relaxed(p->dbt[1], p->base + DMA_DBTB); | |
1056 | writel_relaxed(p->dcsr, p->base + DMA_DCSR_S); | |
1057 | } | |
1058 | ||
1059 | return 0; | |
1060 | } | |
1061 | #endif | |
1062 | ||
1063 | static const struct dev_pm_ops sa11x0_dma_pm_ops = { | |
1064 | .suspend_noirq = sa11x0_dma_suspend, | |
1065 | .resume_noirq = sa11x0_dma_resume, | |
1066 | .freeze_noirq = sa11x0_dma_suspend, | |
1067 | .thaw_noirq = sa11x0_dma_resume, | |
1068 | .poweroff_noirq = sa11x0_dma_suspend, | |
1069 | .restore_noirq = sa11x0_dma_resume, | |
1070 | }; | |
1071 | ||
1072 | static struct platform_driver sa11x0_dma_driver = { | |
1073 | .driver = { | |
1074 | .name = "sa11x0-dma", | |
1075 | .owner = THIS_MODULE, | |
1076 | .pm = &sa11x0_dma_pm_ops, | |
1077 | }, | |
1078 | .probe = sa11x0_dma_probe, | |
1079 | .remove = __devexit_p(sa11x0_dma_remove), | |
1080 | }; | |
1081 | ||
1082 | bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) | |
1083 | { | |
1084 | if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { | |
1085 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | |
1086 | const char *p = param; | |
1087 | ||
1088 | return !strcmp(c->name, p); | |
1089 | } | |
1090 | return false; | |
1091 | } | |
1092 | EXPORT_SYMBOL(sa11x0_dma_filter_fn); | |
1093 | ||
1094 | static int __init sa11x0_dma_init(void) | |
1095 | { | |
1096 | return platform_driver_register(&sa11x0_dma_driver); | |
1097 | } | |
1098 | subsys_initcall(sa11x0_dma_init); | |
1099 | ||
1100 | static void __exit sa11x0_dma_exit(void) | |
1101 | { | |
1102 | platform_driver_unregister(&sa11x0_dma_driver); | |
1103 | } | |
1104 | module_exit(sa11x0_dma_exit); | |
1105 | ||
1106 | MODULE_AUTHOR("Russell King"); | |
1107 | MODULE_DESCRIPTION("SA-11x0 DMA driver"); | |
1108 | MODULE_LICENSE("GPL v2"); | |
1109 | MODULE_ALIAS("platform:sa11x0-dma"); |